mirror of https://gitee.com/openkylin/linux.git
Merge tag 'gvt-next-2017-08-15' of https://github.com/01org/gvt-linux into drm-intel-next-queued
gvt-next-2017-08-15 gvt update for 4.14 - MMIO save/restore optimization (Changbin) - Split workload scan vs. dispatch for more parallel exec (Ping) - vGPU full 48bit ppgtt support (Joonas, Tina) - vGPU hw id expose for perf (Zhenyu) - other misc cleanup and fixes Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20170815023940.skhjfcsyrao7axqi@zhen-hp.sh.intel.com
This commit is contained in:
commit
baa68f6e5b
|
@ -285,8 +285,8 @@ static int alloc_resource(struct intel_vgpu *vgpu,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
no_enough_resource:
|
no_enough_resource:
|
||||||
gvt_vgpu_err("fail to allocate resource %s\n", item);
|
gvt_err("fail to allocate resource %s\n", item);
|
||||||
gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n",
|
gvt_err("request %luMB avail %luMB max %luMB taken %luMB\n",
|
||||||
BYTES_TO_MB(request), BYTES_TO_MB(avail),
|
BYTES_TO_MB(request), BYTES_TO_MB(avail),
|
||||||
BYTES_TO_MB(max), BYTES_TO_MB(taken));
|
BYTES_TO_MB(max), BYTES_TO_MB(taken));
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
|
@ -1382,13 +1382,13 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
} else if ((!vgpu_gmadr_is_valid(s->vgpu, guest_gma)) ||
|
} else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
|
||||||
(!vgpu_gmadr_is_valid(s->vgpu,
|
|
||||||
guest_gma + op_size - 1))) {
|
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err:
|
err:
|
||||||
gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
|
gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
|
||||||
s->info->name, guest_gma, op_size);
|
s->info->name, guest_gma, op_size);
|
||||||
|
@ -2647,7 +2647,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct intel_vgpu *vgpu = workload->vgpu;
|
struct intel_vgpu *vgpu = workload->vgpu;
|
||||||
|
|
|
@ -42,7 +42,7 @@ void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
|
||||||
|
|
||||||
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
|
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
|
||||||
|
|
||||||
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
|
int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload);
|
||||||
|
|
||||||
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
|
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
|
||||||
|
|
||||||
|
|
|
@ -605,6 +605,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||||
struct list_head *q = workload_q_head(vgpu, ring_id);
|
struct list_head *q = workload_q_head(vgpu, ring_id);
|
||||||
struct intel_vgpu_workload *last_workload = get_last_workload(q);
|
struct intel_vgpu_workload *last_workload = get_last_workload(q);
|
||||||
struct intel_vgpu_workload *workload = NULL;
|
struct intel_vgpu_workload *workload = NULL;
|
||||||
|
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||||
u64 ring_context_gpa;
|
u64 ring_context_gpa;
|
||||||
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
|
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -668,6 +669,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||||
workload->complete = complete_execlist_workload;
|
workload->complete = complete_execlist_workload;
|
||||||
workload->status = -EINPROGRESS;
|
workload->status = -EINPROGRESS;
|
||||||
workload->emulate_schedule_in = emulate_schedule_in;
|
workload->emulate_schedule_in = emulate_schedule_in;
|
||||||
|
workload->shadowed = false;
|
||||||
|
|
||||||
if (ring_id == RCS) {
|
if (ring_id == RCS) {
|
||||||
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
|
||||||
|
@ -701,6 +703,17 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Only scan and shadow the first workload in the queue
|
||||||
|
* as there is only one pre-allocated buf-obj for shadow.
|
||||||
|
*/
|
||||||
|
if (list_empty(workload_q_head(vgpu, ring_id))) {
|
||||||
|
intel_runtime_pm_get(dev_priv);
|
||||||
|
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||||
|
intel_gvt_scan_and_shadow_workload(workload);
|
||||||
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||||
|
intel_runtime_pm_put(dev_priv);
|
||||||
|
}
|
||||||
|
|
||||||
queue_workload(workload);
|
queue_workload(workload);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -783,6 +796,8 @@ static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
|
||||||
list_del_init(&pos->list);
|
list_del_init(&pos->list);
|
||||||
free_workload(pos);
|
free_workload(pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clear_bit(engine->id, vgpu->shadow_ctx_desc_updated);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -259,7 +259,7 @@ static void write_pte64(struct drm_i915_private *dev_priv,
|
||||||
writeq(pte, addr);
|
writeq(pte, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
|
static inline int gtt_get_entry64(void *pt,
|
||||||
struct intel_gvt_gtt_entry *e,
|
struct intel_gvt_gtt_entry *e,
|
||||||
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
||||||
struct intel_vgpu *vgpu)
|
struct intel_vgpu *vgpu)
|
||||||
|
@ -268,22 +268,23 @@ static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (WARN_ON(info->gtt_entry_size != 8))
|
if (WARN_ON(info->gtt_entry_size != 8))
|
||||||
return e;
|
return -EINVAL;
|
||||||
|
|
||||||
if (hypervisor_access) {
|
if (hypervisor_access) {
|
||||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
|
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
|
||||||
(index << info->gtt_entry_size_shift),
|
(index << info->gtt_entry_size_shift),
|
||||||
&e->val64, 8);
|
&e->val64, 8);
|
||||||
WARN_ON(ret);
|
if (WARN_ON(ret))
|
||||||
|
return ret;
|
||||||
} else if (!pt) {
|
} else if (!pt) {
|
||||||
e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
|
e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
|
||||||
} else {
|
} else {
|
||||||
e->val64 = *((u64 *)pt + index);
|
e->val64 = *((u64 *)pt + index);
|
||||||
}
|
}
|
||||||
return e;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
|
static inline int gtt_set_entry64(void *pt,
|
||||||
struct intel_gvt_gtt_entry *e,
|
struct intel_gvt_gtt_entry *e,
|
||||||
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
||||||
struct intel_vgpu *vgpu)
|
struct intel_vgpu *vgpu)
|
||||||
|
@ -292,19 +293,20 @@ static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (WARN_ON(info->gtt_entry_size != 8))
|
if (WARN_ON(info->gtt_entry_size != 8))
|
||||||
return e;
|
return -EINVAL;
|
||||||
|
|
||||||
if (hypervisor_access) {
|
if (hypervisor_access) {
|
||||||
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
|
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
|
||||||
(index << info->gtt_entry_size_shift),
|
(index << info->gtt_entry_size_shift),
|
||||||
&e->val64, 8);
|
&e->val64, 8);
|
||||||
WARN_ON(ret);
|
if (WARN_ON(ret))
|
||||||
|
return ret;
|
||||||
} else if (!pt) {
|
} else if (!pt) {
|
||||||
write_pte64(vgpu->gvt->dev_priv, index, e->val64);
|
write_pte64(vgpu->gvt->dev_priv, index, e->val64);
|
||||||
} else {
|
} else {
|
||||||
*((u64 *)pt + index) = e->val64;
|
*((u64 *)pt + index) = e->val64;
|
||||||
}
|
}
|
||||||
return e;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define GTT_HAW 46
|
#define GTT_HAW 46
|
||||||
|
@ -445,21 +447,25 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
|
||||||
/*
|
/*
|
||||||
* MM helpers.
|
* MM helpers.
|
||||||
*/
|
*/
|
||||||
struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
|
int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
|
||||||
void *page_table, struct intel_gvt_gtt_entry *e,
|
void *page_table, struct intel_gvt_gtt_entry *e,
|
||||||
unsigned long index)
|
unsigned long index)
|
||||||
{
|
{
|
||||||
struct intel_gvt *gvt = mm->vgpu->gvt;
|
struct intel_gvt *gvt = mm->vgpu->gvt;
|
||||||
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
||||||
|
int ret;
|
||||||
|
|
||||||
e->type = mm->page_table_entry_type;
|
e->type = mm->page_table_entry_type;
|
||||||
|
|
||||||
ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
|
ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
ops->test_pse(e);
|
ops->test_pse(e);
|
||||||
return e;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
|
int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
|
||||||
void *page_table, struct intel_gvt_gtt_entry *e,
|
void *page_table, struct intel_gvt_gtt_entry *e,
|
||||||
unsigned long index)
|
unsigned long index)
|
||||||
{
|
{
|
||||||
|
@ -472,7 +478,7 @@ struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
|
||||||
/*
|
/*
|
||||||
* PPGTT shadow page table helpers.
|
* PPGTT shadow page table helpers.
|
||||||
*/
|
*/
|
||||||
static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
|
static inline int ppgtt_spt_get_entry(
|
||||||
struct intel_vgpu_ppgtt_spt *spt,
|
struct intel_vgpu_ppgtt_spt *spt,
|
||||||
void *page_table, int type,
|
void *page_table, int type,
|
||||||
struct intel_gvt_gtt_entry *e, unsigned long index,
|
struct intel_gvt_gtt_entry *e, unsigned long index,
|
||||||
|
@ -480,20 +486,24 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
|
||||||
{
|
{
|
||||||
struct intel_gvt *gvt = spt->vgpu->gvt;
|
struct intel_gvt *gvt = spt->vgpu->gvt;
|
||||||
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
||||||
|
int ret;
|
||||||
|
|
||||||
e->type = get_entry_type(type);
|
e->type = get_entry_type(type);
|
||||||
|
|
||||||
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
|
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
|
||||||
return e;
|
return -EINVAL;
|
||||||
|
|
||||||
ops->get_entry(page_table, e, index, guest,
|
ret = ops->get_entry(page_table, e, index, guest,
|
||||||
spt->guest_page.gfn << GTT_PAGE_SHIFT,
|
spt->guest_page.gfn << GTT_PAGE_SHIFT,
|
||||||
spt->vgpu);
|
spt->vgpu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
ops->test_pse(e);
|
ops->test_pse(e);
|
||||||
return e;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
|
static inline int ppgtt_spt_set_entry(
|
||||||
struct intel_vgpu_ppgtt_spt *spt,
|
struct intel_vgpu_ppgtt_spt *spt,
|
||||||
void *page_table, int type,
|
void *page_table, int type,
|
||||||
struct intel_gvt_gtt_entry *e, unsigned long index,
|
struct intel_gvt_gtt_entry *e, unsigned long index,
|
||||||
|
@ -503,7 +513,7 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
|
||||||
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
|
||||||
|
|
||||||
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
|
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
|
||||||
return e;
|
return -EINVAL;
|
||||||
|
|
||||||
return ops->set_entry(page_table, e, index, guest,
|
return ops->set_entry(page_table, e, index, guest,
|
||||||
spt->guest_page.gfn << GTT_PAGE_SHIFT,
|
spt->guest_page.gfn << GTT_PAGE_SHIFT,
|
||||||
|
@ -792,13 +802,13 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
|
||||||
|
|
||||||
#define for_each_present_guest_entry(spt, e, i) \
|
#define for_each_present_guest_entry(spt, e, i) \
|
||||||
for (i = 0; i < pt_entries(spt); i++) \
|
for (i = 0; i < pt_entries(spt); i++) \
|
||||||
if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
|
if (!ppgtt_get_guest_entry(spt, e, i) && \
|
||||||
ppgtt_get_guest_entry(spt, e, i)))
|
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
|
||||||
|
|
||||||
#define for_each_present_shadow_entry(spt, e, i) \
|
#define for_each_present_shadow_entry(spt, e, i) \
|
||||||
for (i = 0; i < pt_entries(spt); i++) \
|
for (i = 0; i < pt_entries(spt); i++) \
|
||||||
if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
|
if (!ppgtt_get_shadow_entry(spt, e, i) && \
|
||||||
ppgtt_get_shadow_entry(spt, e, i)))
|
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
|
||||||
|
|
||||||
static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
||||||
{
|
{
|
||||||
|
@ -979,29 +989,26 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
|
static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
|
||||||
unsigned long index)
|
struct intel_gvt_gtt_entry *se, unsigned long index)
|
||||||
{
|
{
|
||||||
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
|
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
|
||||||
struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
|
struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
|
||||||
struct intel_vgpu *vgpu = spt->vgpu;
|
struct intel_vgpu *vgpu = spt->vgpu;
|
||||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||||
struct intel_gvt_gtt_entry e;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ppgtt_get_shadow_entry(spt, &e, index);
|
trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, se->val64,
|
||||||
|
|
||||||
trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
|
|
||||||
index);
|
index);
|
||||||
|
|
||||||
if (!ops->test_present(&e))
|
if (!ops->test_present(se))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
|
if (ops->get_pfn(se) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (gtt_type_is_pt(get_next_pt_type(e.type))) {
|
if (gtt_type_is_pt(get_next_pt_type(se->type))) {
|
||||||
struct intel_vgpu_ppgtt_spt *s =
|
struct intel_vgpu_ppgtt_spt *s =
|
||||||
ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
|
ppgtt_find_shadow_page(vgpu, ops->get_pfn(se));
|
||||||
if (!s) {
|
if (!s) {
|
||||||
gvt_vgpu_err("fail to find guest page\n");
|
gvt_vgpu_err("fail to find guest page\n");
|
||||||
ret = -ENXIO;
|
ret = -ENXIO;
|
||||||
|
@ -1011,12 +1018,10 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
ops->set_pfn(&e, vgpu->gtt.scratch_pt[sp->type].page_mfn);
|
|
||||||
ppgtt_set_shadow_entry(spt, &e, index);
|
|
||||||
return 0;
|
return 0;
|
||||||
fail:
|
fail:
|
||||||
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
||||||
spt, e.val64, e.type);
|
spt, se->val64, se->type);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1236,22 +1241,37 @@ static int ppgtt_handle_guest_write_page_table(
|
||||||
{
|
{
|
||||||
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
|
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
|
||||||
struct intel_vgpu *vgpu = spt->vgpu;
|
struct intel_vgpu *vgpu = spt->vgpu;
|
||||||
|
int type = spt->shadow_page.type;
|
||||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||||
|
struct intel_gvt_gtt_entry se;
|
||||||
|
|
||||||
int ret;
|
int ret;
|
||||||
int new_present;
|
int new_present;
|
||||||
|
|
||||||
new_present = ops->test_present(we);
|
new_present = ops->test_present(we);
|
||||||
|
|
||||||
ret = ppgtt_handle_guest_entry_removal(gpt, index);
|
/*
|
||||||
if (ret)
|
* Adding the new entry first and then removing the old one, that can
|
||||||
goto fail;
|
* guarantee the ppgtt table is validated during the window between
|
||||||
|
* adding and removal.
|
||||||
|
*/
|
||||||
|
ppgtt_get_shadow_entry(spt, &se, index);
|
||||||
|
|
||||||
if (new_present) {
|
if (new_present) {
|
||||||
ret = ppgtt_handle_guest_entry_add(gpt, we, index);
|
ret = ppgtt_handle_guest_entry_add(gpt, we, index);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
|
||||||
|
if (ret)
|
||||||
|
goto fail;
|
||||||
|
|
||||||
|
if (!new_present) {
|
||||||
|
ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
|
||||||
|
ppgtt_set_shadow_entry(spt, &se, index);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
fail:
|
fail:
|
||||||
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
|
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
|
||||||
|
@ -1323,7 +1343,7 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
|
||||||
struct intel_vgpu *vgpu = spt->vgpu;
|
struct intel_vgpu *vgpu = spt->vgpu;
|
||||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||||
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
||||||
struct intel_gvt_gtt_entry we;
|
struct intel_gvt_gtt_entry we, se;
|
||||||
unsigned long index;
|
unsigned long index;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -1339,7 +1359,8 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
|
||||||
return ret;
|
return ret;
|
||||||
} else {
|
} else {
|
||||||
if (!test_bit(index, spt->post_shadow_bitmap)) {
|
if (!test_bit(index, spt->post_shadow_bitmap)) {
|
||||||
ret = ppgtt_handle_guest_entry_removal(gpt, index);
|
ppgtt_get_shadow_entry(spt, &se, index);
|
||||||
|
ret = ppgtt_handle_guest_entry_removal(gpt, &se, index);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1713,8 +1734,10 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
||||||
if (!vgpu_gmadr_is_valid(vgpu, gma))
|
if (!vgpu_gmadr_is_valid(vgpu, gma))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
ggtt_get_guest_entry(mm, &e,
|
ret = ggtt_get_guest_entry(mm, &e,
|
||||||
gma_ops->gma_to_ggtt_pte_index(gma));
|
gma_ops->gma_to_ggtt_pte_index(gma));
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
|
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
|
||||||
+ (gma & ~GTT_PAGE_MASK);
|
+ (gma & ~GTT_PAGE_MASK);
|
||||||
|
|
||||||
|
@ -1724,7 +1747,9 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
||||||
|
|
||||||
switch (mm->page_table_level) {
|
switch (mm->page_table_level) {
|
||||||
case 4:
|
case 4:
|
||||||
ppgtt_get_shadow_root_entry(mm, &e, 0);
|
ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
gma_index[0] = gma_ops->gma_to_pml4_index(gma);
|
gma_index[0] = gma_ops->gma_to_pml4_index(gma);
|
||||||
gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
|
gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
|
||||||
gma_index[2] = gma_ops->gma_to_pde_index(gma);
|
gma_index[2] = gma_ops->gma_to_pde_index(gma);
|
||||||
|
@ -1732,15 +1757,19 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
||||||
index = 4;
|
index = 4;
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
ppgtt_get_shadow_root_entry(mm, &e,
|
ret = ppgtt_get_shadow_root_entry(mm, &e,
|
||||||
gma_ops->gma_to_l3_pdp_index(gma));
|
gma_ops->gma_to_l3_pdp_index(gma));
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
gma_index[0] = gma_ops->gma_to_pde_index(gma);
|
gma_index[0] = gma_ops->gma_to_pde_index(gma);
|
||||||
gma_index[1] = gma_ops->gma_to_pte_index(gma);
|
gma_index[1] = gma_ops->gma_to_pte_index(gma);
|
||||||
index = 2;
|
index = 2;
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
ppgtt_get_shadow_root_entry(mm, &e,
|
ret = ppgtt_get_shadow_root_entry(mm, &e,
|
||||||
gma_ops->gma_to_pde_index(gma));
|
gma_ops->gma_to_pde_index(gma));
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
gma_index[0] = gma_ops->gma_to_pte_index(gma);
|
gma_index[0] = gma_ops->gma_to_pte_index(gma);
|
||||||
index = 1;
|
index = 1;
|
||||||
break;
|
break;
|
||||||
|
@ -1755,6 +1784,11 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
|
||||||
(i == index - 1));
|
(i == index - 1));
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
if (!pte_ops->test_present(&e)) {
|
||||||
|
gvt_dbg_core("GMA 0x%lx is not present\n", gma);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
|
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
|
||||||
|
@ -2329,13 +2363,12 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
||||||
/**
|
/**
|
||||||
* intel_vgpu_reset_gtt - reset the all GTT related status
|
* intel_vgpu_reset_gtt - reset the all GTT related status
|
||||||
* @vgpu: a vGPU
|
* @vgpu: a vGPU
|
||||||
* @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
|
|
||||||
*
|
*
|
||||||
* This function is called from vfio core to reset reset all
|
* This function is called from vfio core to reset reset all
|
||||||
* GTT related status, including GGTT, PPGTT, scratch page.
|
* GTT related status, including GGTT, PPGTT, scratch page.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
|
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -2347,9 +2380,6 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
|
||||||
*/
|
*/
|
||||||
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
|
intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
|
||||||
|
|
||||||
if (!dmlr)
|
|
||||||
return;
|
|
||||||
|
|
||||||
intel_vgpu_reset_ggtt(vgpu);
|
intel_vgpu_reset_ggtt(vgpu);
|
||||||
|
|
||||||
/* clear scratch page for security */
|
/* clear scratch page for security */
|
||||||
|
|
|
@ -49,14 +49,18 @@ struct intel_gvt_gtt_entry {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct intel_gvt_gtt_pte_ops {
|
struct intel_gvt_gtt_pte_ops {
|
||||||
struct intel_gvt_gtt_entry *(*get_entry)(void *pt,
|
int (*get_entry)(void *pt,
|
||||||
struct intel_gvt_gtt_entry *e,
|
struct intel_gvt_gtt_entry *e,
|
||||||
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
unsigned long index,
|
||||||
struct intel_vgpu *vgpu);
|
bool hypervisor_access,
|
||||||
struct intel_gvt_gtt_entry *(*set_entry)(void *pt,
|
unsigned long gpa,
|
||||||
struct intel_gvt_gtt_entry *e,
|
struct intel_vgpu *vgpu);
|
||||||
unsigned long index, bool hypervisor_access, unsigned long gpa,
|
int (*set_entry)(void *pt,
|
||||||
struct intel_vgpu *vgpu);
|
struct intel_gvt_gtt_entry *e,
|
||||||
|
unsigned long index,
|
||||||
|
bool hypervisor_access,
|
||||||
|
unsigned long gpa,
|
||||||
|
struct intel_vgpu *vgpu);
|
||||||
bool (*test_present)(struct intel_gvt_gtt_entry *e);
|
bool (*test_present)(struct intel_gvt_gtt_entry *e);
|
||||||
void (*clear_present)(struct intel_gvt_gtt_entry *e);
|
void (*clear_present)(struct intel_gvt_gtt_entry *e);
|
||||||
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
|
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
|
||||||
|
@ -143,12 +147,12 @@ struct intel_vgpu_mm {
|
||||||
struct intel_vgpu *vgpu;
|
struct intel_vgpu *vgpu;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(
|
extern int intel_vgpu_mm_get_entry(
|
||||||
struct intel_vgpu_mm *mm,
|
struct intel_vgpu_mm *mm,
|
||||||
void *page_table, struct intel_gvt_gtt_entry *e,
|
void *page_table, struct intel_gvt_gtt_entry *e,
|
||||||
unsigned long index);
|
unsigned long index);
|
||||||
|
|
||||||
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(
|
extern int intel_vgpu_mm_set_entry(
|
||||||
struct intel_vgpu_mm *mm,
|
struct intel_vgpu_mm *mm,
|
||||||
void *page_table, struct intel_gvt_gtt_entry *e,
|
void *page_table, struct intel_gvt_gtt_entry *e,
|
||||||
unsigned long index);
|
unsigned long index);
|
||||||
|
@ -208,7 +212,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
|
||||||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
|
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
|
||||||
|
|
||||||
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
||||||
extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
|
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
|
||||||
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
|
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
|
||||||
|
|
||||||
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
|
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||||
|
|
|
@ -167,6 +167,7 @@ struct intel_vgpu {
|
||||||
atomic_t running_workload_num;
|
atomic_t running_workload_num;
|
||||||
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
|
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
|
||||||
struct i915_gem_context *shadow_ctx;
|
struct i915_gem_context *shadow_ctx;
|
||||||
|
DECLARE_BITMAP(shadow_ctx_desc_updated, I915_NUM_ENGINES);
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
|
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
|
||||||
struct {
|
struct {
|
||||||
|
@ -470,6 +471,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
|
||||||
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
|
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
|
||||||
void populate_pvinfo_page(struct intel_vgpu *vgpu);
|
void populate_pvinfo_page(struct intel_vgpu *vgpu);
|
||||||
|
|
||||||
|
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
|
||||||
|
|
||||||
struct intel_gvt_ops {
|
struct intel_gvt_ops {
|
||||||
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
|
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
|
||||||
unsigned int);
|
unsigned int);
|
||||||
|
|
|
@ -113,9 +113,17 @@ static int new_mmio_info(struct intel_gvt *gvt,
|
||||||
|
|
||||||
info->offset = i;
|
info->offset = i;
|
||||||
p = find_mmio_info(gvt, info->offset);
|
p = find_mmio_info(gvt, info->offset);
|
||||||
if (p)
|
if (p) {
|
||||||
gvt_err("dup mmio definition offset %x\n",
|
WARN(1, "dup mmio definition offset %x\n",
|
||||||
info->offset);
|
info->offset);
|
||||||
|
kfree(info);
|
||||||
|
|
||||||
|
/* We return -EEXIST here to make GVT-g load fail.
|
||||||
|
* So duplicated MMIO can be found as soon as
|
||||||
|
* possible.
|
||||||
|
*/
|
||||||
|
return -EEXIST;
|
||||||
|
}
|
||||||
|
|
||||||
info->ro_mask = ro_mask;
|
info->ro_mask = ro_mask;
|
||||||
info->device = device;
|
info->device = device;
|
||||||
|
@ -2583,7 +2591,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
|
||||||
MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
|
MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
|
||||||
NULL, force_nonpriv_write);
|
NULL, force_nonpriv_write);
|
||||||
|
|
||||||
MMIO_D(0x22040, D_BDW_PLUS);
|
|
||||||
MMIO_D(0x44484, D_BDW_PLUS);
|
MMIO_D(0x44484, D_BDW_PLUS);
|
||||||
MMIO_D(0x4448c, D_BDW_PLUS);
|
MMIO_D(0x4448c, D_BDW_PLUS);
|
||||||
|
|
||||||
|
@ -2641,7 +2648,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||||
MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS);
|
MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS);
|
||||||
MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL,
|
MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL,
|
||||||
skl_power_well_ctl_write);
|
skl_power_well_ctl_write);
|
||||||
MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write);
|
|
||||||
|
|
||||||
MMIO_D(0xa210, D_SKL_PLUS);
|
MMIO_D(0xa210, D_SKL_PLUS);
|
||||||
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
||||||
|
@ -2833,7 +2839,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||||
MMIO_D(0x320f0, D_SKL | D_KBL);
|
MMIO_D(0x320f0, D_SKL | D_KBL);
|
||||||
|
|
||||||
MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
||||||
MMIO_DFH(_REG_VECS_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
|
|
||||||
MMIO_D(0x70034, D_SKL_PLUS);
|
MMIO_D(0x70034, D_SKL_PLUS);
|
||||||
MMIO_D(0x71034, D_SKL_PLUS);
|
MMIO_D(0x71034, D_SKL_PLUS);
|
||||||
MMIO_D(0x72034, D_SKL_PLUS);
|
MMIO_D(0x72034, D_SKL_PLUS);
|
||||||
|
@ -2851,10 +2856,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||||
NULL, NULL);
|
NULL, NULL);
|
||||||
|
|
||||||
MMIO_D(0x4ab8, D_KBL);
|
MMIO_D(0x4ab8, D_KBL);
|
||||||
MMIO_D(0x940c, D_SKL_PLUS);
|
|
||||||
MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
|
MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
|
||||||
MMIO_D(0x4ab0, D_SKL | D_KBL);
|
|
||||||
MMIO_D(0x20d4, D_SKL | D_KBL);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1170,10 +1170,27 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr,
|
||||||
return sprintf(buf, "\n");
|
return sprintf(buf, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t
|
||||||
|
hw_id_show(struct device *dev, struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct mdev_device *mdev = mdev_from_dev(dev);
|
||||||
|
|
||||||
|
if (mdev) {
|
||||||
|
struct intel_vgpu *vgpu = (struct intel_vgpu *)
|
||||||
|
mdev_get_drvdata(mdev);
|
||||||
|
return sprintf(buf, "%u\n",
|
||||||
|
vgpu->shadow_ctx->hw_id);
|
||||||
|
}
|
||||||
|
return sprintf(buf, "\n");
|
||||||
|
}
|
||||||
|
|
||||||
static DEVICE_ATTR_RO(vgpu_id);
|
static DEVICE_ATTR_RO(vgpu_id);
|
||||||
|
static DEVICE_ATTR_RO(hw_id);
|
||||||
|
|
||||||
static struct attribute *intel_vgpu_attrs[] = {
|
static struct attribute *intel_vgpu_attrs[] = {
|
||||||
&dev_attr_vgpu_id.attr,
|
&dev_attr_vgpu_id.attr,
|
||||||
|
&dev_attr_hw_id.attr,
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -207,18 +207,16 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
|
||||||
|
|
||||||
offset.reg = regs[ring_id];
|
offset.reg = regs[ring_id];
|
||||||
for (i = 0; i < 64; i++) {
|
for (i = 0; i < 64; i++) {
|
||||||
gen9_render_mocs[ring_id][i] = I915_READ(offset);
|
gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
|
||||||
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
|
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
|
||||||
POSTING_READ(offset);
|
|
||||||
offset.reg += 4;
|
offset.reg += 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ring_id == RCS) {
|
if (ring_id == RCS) {
|
||||||
l3_offset.reg = 0xb020;
|
l3_offset.reg = 0xb020;
|
||||||
for (i = 0; i < 32; i++) {
|
for (i = 0; i < 32; i++) {
|
||||||
gen9_render_mocs_L3[i] = I915_READ(l3_offset);
|
gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
|
||||||
I915_WRITE(l3_offset, vgpu_vreg(vgpu, l3_offset));
|
I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
|
||||||
POSTING_READ(l3_offset);
|
|
||||||
l3_offset.reg += 4;
|
l3_offset.reg += 4;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -242,18 +240,16 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
|
||||||
|
|
||||||
offset.reg = regs[ring_id];
|
offset.reg = regs[ring_id];
|
||||||
for (i = 0; i < 64; i++) {
|
for (i = 0; i < 64; i++) {
|
||||||
vgpu_vreg(vgpu, offset) = I915_READ(offset);
|
vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
|
||||||
I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
|
I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
|
||||||
POSTING_READ(offset);
|
|
||||||
offset.reg += 4;
|
offset.reg += 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ring_id == RCS) {
|
if (ring_id == RCS) {
|
||||||
l3_offset.reg = 0xb020;
|
l3_offset.reg = 0xb020;
|
||||||
for (i = 0; i < 32; i++) {
|
for (i = 0; i < 32; i++) {
|
||||||
vgpu_vreg(vgpu, l3_offset) = I915_READ(l3_offset);
|
vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset);
|
||||||
I915_WRITE(l3_offset, gen9_render_mocs_L3[i]);
|
I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]);
|
||||||
POSTING_READ(l3_offset);
|
|
||||||
l3_offset.reg += 4;
|
l3_offset.reg += 4;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -272,6 +268,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
|
||||||
u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
|
u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
|
||||||
u32 inhibit_mask =
|
u32 inhibit_mask =
|
||||||
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
||||||
|
i915_reg_t last_reg = _MMIO(0);
|
||||||
|
|
||||||
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|
if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|
||||||
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
|
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) {
|
||||||
|
@ -287,7 +284,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
|
||||||
if (mmio->ring_id != ring_id)
|
if (mmio->ring_id != ring_id)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
mmio->value = I915_READ(mmio->reg);
|
mmio->value = I915_READ_FW(mmio->reg);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if it is an inhibit context, load in_context mmio
|
* if it is an inhibit context, load in_context mmio
|
||||||
|
@ -304,13 +301,18 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
|
||||||
else
|
else
|
||||||
v = vgpu_vreg(vgpu, mmio->reg);
|
v = vgpu_vreg(vgpu, mmio->reg);
|
||||||
|
|
||||||
I915_WRITE(mmio->reg, v);
|
I915_WRITE_FW(mmio->reg, v);
|
||||||
POSTING_READ(mmio->reg);
|
last_reg = mmio->reg;
|
||||||
|
|
||||||
trace_render_mmio(vgpu->id, "load",
|
trace_render_mmio(vgpu->id, "load",
|
||||||
i915_mmio_reg_offset(mmio->reg),
|
i915_mmio_reg_offset(mmio->reg),
|
||||||
mmio->value, v);
|
mmio->value, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Make sure the swiched MMIOs has taken effect. */
|
||||||
|
if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
|
||||||
|
I915_READ_FW(last_reg);
|
||||||
|
|
||||||
handle_tlb_pending_event(vgpu, ring_id);
|
handle_tlb_pending_event(vgpu, ring_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -319,6 +321,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||||
struct render_mmio *mmio;
|
struct render_mmio *mmio;
|
||||||
|
i915_reg_t last_reg = _MMIO(0);
|
||||||
u32 v;
|
u32 v;
|
||||||
int i, array_size;
|
int i, array_size;
|
||||||
|
|
||||||
|
@ -335,7 +338,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
|
||||||
if (mmio->ring_id != ring_id)
|
if (mmio->ring_id != ring_id)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
vgpu_vreg(vgpu, mmio->reg) = I915_READ(mmio->reg);
|
vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg);
|
||||||
|
|
||||||
if (mmio->mask) {
|
if (mmio->mask) {
|
||||||
vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
|
vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
|
||||||
|
@ -346,13 +349,17 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
|
||||||
if (mmio->in_context)
|
if (mmio->in_context)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
I915_WRITE(mmio->reg, v);
|
I915_WRITE_FW(mmio->reg, v);
|
||||||
POSTING_READ(mmio->reg);
|
last_reg = mmio->reg;
|
||||||
|
|
||||||
trace_render_mmio(vgpu->id, "restore",
|
trace_render_mmio(vgpu->id, "restore",
|
||||||
i915_mmio_reg_offset(mmio->reg),
|
i915_mmio_reg_offset(mmio->reg),
|
||||||
mmio->value, v);
|
mmio->value, v);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Make sure the swiched MMIOs has taken effect. */
|
||||||
|
if (likely(INTEL_GVT_MMIO_OFFSET(last_reg)))
|
||||||
|
I915_READ_FW(last_reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -367,12 +374,23 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
|
||||||
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
|
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
|
||||||
struct intel_vgpu *next, int ring_id)
|
struct intel_vgpu *next, int ring_id)
|
||||||
{
|
{
|
||||||
|
struct drm_i915_private *dev_priv;
|
||||||
|
|
||||||
if (WARN_ON(!pre && !next))
|
if (WARN_ON(!pre && !next))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
|
gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
|
||||||
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
|
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
|
||||||
|
|
||||||
|
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* We are using raw mmio access wrapper to improve the
|
||||||
|
* performace for batch mmio read/write, so we need
|
||||||
|
* handle forcewake mannually.
|
||||||
|
*/
|
||||||
|
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* TODO: Optimize for vGPU to vGPU switch by merging
|
* TODO: Optimize for vGPU to vGPU switch by merging
|
||||||
* switch_mmio_to_host() and switch_mmio_to_vgpu().
|
* switch_mmio_to_host() and switch_mmio_to_vgpu().
|
||||||
|
@ -382,4 +400,6 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
|
||||||
|
|
||||||
if (next)
|
if (next)
|
||||||
switch_mmio_to_vgpu(next, ring_id);
|
switch_mmio_to_vgpu(next, ring_id);
|
||||||
|
|
||||||
|
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||||
}
|
}
|
||||||
|
|
|
@ -184,41 +184,52 @@ static int shadow_context_status_change(struct notifier_block *nb,
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dispatch_workload(struct intel_vgpu_workload *workload)
|
static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
|
||||||
|
struct intel_engine_cs *engine)
|
||||||
|
{
|
||||||
|
struct intel_context *ce = &ctx->engine[engine->id];
|
||||||
|
u64 desc = 0;
|
||||||
|
|
||||||
|
desc = ce->lrc_desc;
|
||||||
|
|
||||||
|
/* Update bits 0-11 of the context descriptor which includes flags
|
||||||
|
* like GEN8_CTX_* cached in desc_template
|
||||||
|
*/
|
||||||
|
desc &= U64_MAX << 12;
|
||||||
|
desc |= ctx->desc_template & ((1ULL << 12) - 1);
|
||||||
|
|
||||||
|
ce->lrc_desc = desc;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
|
||||||
|
* shadow it as well, include ringbuffer,wa_ctx and ctx.
|
||||||
|
* @workload: an abstract entity for each execlist submission.
|
||||||
|
*
|
||||||
|
* This function is called before the workload submitting to i915, to make
|
||||||
|
* sure the content of the workload is valid.
|
||||||
|
*/
|
||||||
|
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
|
||||||
{
|
{
|
||||||
int ring_id = workload->ring_id;
|
int ring_id = workload->ring_id;
|
||||||
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
||||||
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
||||||
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
|
||||||
struct drm_i915_gem_request *rq;
|
struct drm_i915_gem_request *rq;
|
||||||
struct intel_vgpu *vgpu = workload->vgpu;
|
struct intel_vgpu *vgpu = workload->vgpu;
|
||||||
struct intel_ring *ring;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||||
ring_id, workload);
|
|
||||||
|
if (workload->shadowed)
|
||||||
|
return 0;
|
||||||
|
|
||||||
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
|
shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
|
||||||
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
|
shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
|
||||||
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||||
|
|
||||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
if (!test_and_set_bit(ring_id, vgpu->shadow_ctx_desc_updated))
|
||||||
|
shadow_context_descriptor_update(shadow_ctx,
|
||||||
/* pin shadow context by gvt even the shadow context will be pinned
|
dev_priv->engine[ring_id]);
|
||||||
* when i915 alloc request. That is because gvt will update the guest
|
|
||||||
* context from shadow context when workload is completed, and at that
|
|
||||||
* moment, i915 may already unpined the shadow context to make the
|
|
||||||
* shadow_ctx pages invalid. So gvt need to pin itself. After update
|
|
||||||
* the guest context, gvt can unpin the shadow_ctx safely.
|
|
||||||
*/
|
|
||||||
ring = engine->context_pin(engine, shadow_ctx);
|
|
||||||
if (IS_ERR(ring)) {
|
|
||||||
ret = PTR_ERR(ring);
|
|
||||||
gvt_vgpu_err("fail to pin shadow context\n");
|
|
||||||
workload->status = ret;
|
|
||||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
|
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
|
||||||
if (IS_ERR(rq)) {
|
if (IS_ERR(rq)) {
|
||||||
|
@ -231,7 +242,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||||
|
|
||||||
workload->req = i915_gem_request_get(rq);
|
workload->req = i915_gem_request_get(rq);
|
||||||
|
|
||||||
ret = intel_gvt_scan_and_shadow_workload(workload);
|
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -246,25 +257,61 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
workload->shadowed = true;
|
||||||
|
|
||||||
|
out:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int dispatch_workload(struct intel_vgpu_workload *workload)
|
||||||
|
{
|
||||||
|
int ring_id = workload->ring_id;
|
||||||
|
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
|
||||||
|
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
|
||||||
|
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
|
||||||
|
struct intel_vgpu *vgpu = workload->vgpu;
|
||||||
|
struct intel_ring *ring;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
|
||||||
|
ring_id, workload);
|
||||||
|
|
||||||
|
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||||
|
|
||||||
|
ret = intel_gvt_scan_and_shadow_workload(workload);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
if (workload->prepare) {
|
if (workload->prepare) {
|
||||||
ret = workload->prepare(workload);
|
ret = workload->prepare(workload);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
|
/* pin shadow context by gvt even the shadow context will be pinned
|
||||||
ring_id, workload->req);
|
* when i915 alloc request. That is because gvt will update the guest
|
||||||
|
* context from shadow context when workload is completed, and at that
|
||||||
|
* moment, i915 may already unpined the shadow context to make the
|
||||||
|
* shadow_ctx pages invalid. So gvt need to pin itself. After update
|
||||||
|
* the guest context, gvt can unpin the shadow_ctx safely.
|
||||||
|
*/
|
||||||
|
ring = engine->context_pin(engine, shadow_ctx);
|
||||||
|
if (IS_ERR(ring)) {
|
||||||
|
ret = PTR_ERR(ring);
|
||||||
|
gvt_vgpu_err("fail to pin shadow context\n");
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
ret = 0;
|
|
||||||
workload->dispatched = true;
|
|
||||||
out:
|
out:
|
||||||
if (ret)
|
if (ret)
|
||||||
workload->status = ret;
|
workload->status = ret;
|
||||||
|
|
||||||
if (!IS_ERR_OR_NULL(rq))
|
if (!IS_ERR_OR_NULL(workload->req)) {
|
||||||
i915_add_request(rq);
|
gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
|
||||||
else
|
ring_id, workload->req);
|
||||||
engine->context_unpin(engine, shadow_ctx);
|
i915_add_request(workload->req);
|
||||||
|
workload->dispatched = true;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -630,5 +677,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
|
||||||
|
|
||||||
vgpu->shadow_ctx->engine[RCS].initialised = true;
|
vgpu->shadow_ctx->engine[RCS].initialised = true;
|
||||||
|
|
||||||
|
bitmap_zero(vgpu->shadow_ctx_desc_updated, I915_NUM_ENGINES);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,6 +82,7 @@ struct intel_vgpu_workload {
|
||||||
struct drm_i915_gem_request *req;
|
struct drm_i915_gem_request *req;
|
||||||
/* if this workload has been dispatched to i915? */
|
/* if this workload has been dispatched to i915? */
|
||||||
bool dispatched;
|
bool dispatched;
|
||||||
|
bool shadowed;
|
||||||
int status;
|
int status;
|
||||||
|
|
||||||
struct intel_vgpu_mm *shadow_mm;
|
struct intel_vgpu_mm *shadow_mm;
|
||||||
|
|
|
@ -43,6 +43,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
||||||
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
|
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
|
||||||
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
|
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
|
||||||
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
|
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
|
||||||
|
vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
|
||||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
|
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
|
||||||
vgpu_aperture_gmadr_base(vgpu);
|
vgpu_aperture_gmadr_base(vgpu);
|
||||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
|
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
|
||||||
|
@ -502,11 +503,11 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||||
/* full GPU reset or device model level reset */
|
/* full GPU reset or device model level reset */
|
||||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||||
|
|
||||||
intel_vgpu_reset_gtt(vgpu, dmlr);
|
|
||||||
|
|
||||||
/*fence will not be reset during virtual reset */
|
/*fence will not be reset during virtual reset */
|
||||||
if (dmlr)
|
if (dmlr) {
|
||||||
|
intel_vgpu_reset_gtt(vgpu);
|
||||||
intel_vgpu_reset_resource(vgpu);
|
intel_vgpu_reset_resource(vgpu);
|
||||||
|
}
|
||||||
|
|
||||||
intel_vgpu_reset_mmio(vgpu, dmlr);
|
intel_vgpu_reset_mmio(vgpu, dmlr);
|
||||||
populate_pvinfo_page(vgpu);
|
populate_pvinfo_page(vgpu);
|
||||||
|
|
|
@ -1905,6 +1905,7 @@ struct i915_workarounds {
|
||||||
|
|
||||||
struct i915_virtual_gpu {
|
struct i915_virtual_gpu {
|
||||||
bool active;
|
bool active;
|
||||||
|
u32 caps;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* used in computing the new watermarks state */
|
/* used in computing the new watermarks state */
|
||||||
|
|
|
@ -144,9 +144,9 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
||||||
has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
|
has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
|
||||||
|
|
||||||
if (intel_vgpu_active(dev_priv)) {
|
if (intel_vgpu_active(dev_priv)) {
|
||||||
/* emulation is too hard */
|
/* GVT-g has no support for 32bit ppgtt */
|
||||||
has_full_ppgtt = false;
|
has_full_ppgtt = false;
|
||||||
has_full_48bit_ppgtt = false;
|
has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!has_aliasing_ppgtt)
|
if (!has_aliasing_ppgtt)
|
||||||
|
@ -180,10 +180,15 @@ int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
|
if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists) {
|
||||||
return has_full_48bit_ppgtt ? 3 : 2;
|
if (has_full_48bit_ppgtt)
|
||||||
else
|
return 3;
|
||||||
return has_aliasing_ppgtt ? 1 : 0;
|
|
||||||
|
if (has_full_ppgtt)
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
return has_aliasing_ppgtt ? 1 : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ppgtt_bind_vma(struct i915_vma *vma,
|
static int ppgtt_bind_vma(struct i915_vma *vma,
|
||||||
|
|
|
@ -49,12 +49,18 @@ enum vgt_g2v_type {
|
||||||
VGT_G2V_MAX,
|
VGT_G2V_MAX,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* VGT capabilities type
|
||||||
|
*/
|
||||||
|
#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
|
||||||
|
|
||||||
struct vgt_if {
|
struct vgt_if {
|
||||||
u64 magic; /* VGT_MAGIC */
|
u64 magic; /* VGT_MAGIC */
|
||||||
u16 version_major;
|
u16 version_major;
|
||||||
u16 version_minor;
|
u16 version_minor;
|
||||||
u32 vgt_id; /* ID of vGT instance */
|
u32 vgt_id; /* ID of vGT instance */
|
||||||
u32 rsv1[12]; /* pad to offset 0x40 */
|
u32 vgt_caps; /* VGT capabilities */
|
||||||
|
u32 rsv1[11]; /* pad to offset 0x40 */
|
||||||
/*
|
/*
|
||||||
* Data structure to describe the balooning info of resources.
|
* Data structure to describe the balooning info of resources.
|
||||||
* Each VM can only have one portion of continuous area for now.
|
* Each VM can only have one portion of continuous area for now.
|
||||||
|
|
|
@ -75,10 +75,17 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps));
|
||||||
|
|
||||||
dev_priv->vgpu.active = true;
|
dev_priv->vgpu.active = true;
|
||||||
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
|
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv)
|
||||||
|
{
|
||||||
|
return dev_priv->vgpu.caps & VGT_CAPS_FULL_48BIT_PPGTT;
|
||||||
|
}
|
||||||
|
|
||||||
struct _balloon_info_ {
|
struct _balloon_info_ {
|
||||||
/*
|
/*
|
||||||
* There are up to 2 regions per mappable/unmappable graphic
|
* There are up to 2 regions per mappable/unmappable graphic
|
||||||
|
|
|
@ -27,6 +27,9 @@
|
||||||
#include "i915_pvinfo.h"
|
#include "i915_pvinfo.h"
|
||||||
|
|
||||||
void i915_check_vgpu(struct drm_i915_private *dev_priv);
|
void i915_check_vgpu(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
|
bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
int intel_vgt_balloon(struct drm_i915_private *dev_priv);
|
int intel_vgt_balloon(struct drm_i915_private *dev_priv);
|
||||||
void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
|
void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue