Merge tag 'gvt-next-2016-10-27' of https://github.com/01org/gvt-linux into drm-intel-next-queued
gvt-next-2016-10-27 - Resolve current left build issue with ACPI=n and 32bit kernel - TLB workaround from Arkadiusz - vGPU reset fix from Ping - workload scheduler nesting sleep fix from Changbin - more misc fixes for sparse warnings and cleanups Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
commit
f6c499eca0
|
@ -87,6 +87,7 @@ config DRM_I915_USERPTR
|
|||
config DRM_I915_GVT
|
||||
bool "Enable Intel GVT-g graphics virtualization host support"
|
||||
depends on DRM_I915
|
||||
depends on 64BIT
|
||||
default n
|
||||
help
|
||||
Choose this option if you want to enable Intel GVT-g graphics
|
||||
|
|
|
@ -1145,7 +1145,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
|
|||
info->event = PRIMARY_B_FLIP_DONE;
|
||||
break;
|
||||
case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
|
||||
info->pipe = PIPE_B;
|
||||
info->pipe = PIPE_C;
|
||||
info->event = PRIMARY_C_FLIP_DONE;
|
||||
break;
|
||||
default:
|
||||
|
@ -1201,20 +1201,19 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
|
|||
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
|
||||
struct intel_vgpu *vgpu = s->vgpu;
|
||||
|
||||
#define write_bits(reg, e, s, v) do { \
|
||||
vgpu_vreg(vgpu, reg) &= ~GENMASK(e, s); \
|
||||
vgpu_vreg(vgpu, reg) |= (v << s); \
|
||||
} while (0)
|
||||
|
||||
write_bits(info->surf_reg, 31, 12, info->surf_val);
|
||||
if (IS_SKYLAKE(dev_priv))
|
||||
write_bits(info->stride_reg, 9, 0, info->stride_val);
|
||||
else
|
||||
write_bits(info->stride_reg, 15, 6, info->stride_val);
|
||||
write_bits(info->ctrl_reg, IS_SKYLAKE(dev_priv) ? 12 : 10,
|
||||
10, info->tile_val);
|
||||
|
||||
#undef write_bits
|
||||
set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
|
||||
info->surf_val << 12);
|
||||
if (IS_SKYLAKE(dev_priv)) {
|
||||
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
|
||||
info->stride_val);
|
||||
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
|
||||
info->tile_val << 10);
|
||||
} else {
|
||||
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
|
||||
info->stride_val << 6);
|
||||
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
|
||||
info->tile_val << 10);
|
||||
}
|
||||
|
||||
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, info->event);
|
||||
|
|
|
@ -276,7 +276,7 @@ static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
|
|||
pte = readq(addr);
|
||||
#else
|
||||
pte = ioread32(addr);
|
||||
pte |= ioread32(addr + 4) << 32;
|
||||
pte |= (u64)ioread32(addr + 4) << 32;
|
||||
#endif
|
||||
return pte;
|
||||
}
|
||||
|
@ -1944,7 +1944,7 @@ static int create_scratch_page(struct intel_vgpu *vgpu)
|
|||
mfn = intel_gvt_hypervisor_virt_to_mfn(vaddr);
|
||||
|
||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("fail to translate vaddr:0x%llx\n", (u64)vaddr);
|
||||
gvt_err("fail to translate vaddr: 0x%p\n", vaddr);
|
||||
__free_page(gtt->scratch_page);
|
||||
gtt->scratch_page = NULL;
|
||||
return -ENXIO;
|
||||
|
|
|
@ -65,6 +65,8 @@ struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = {
|
|||
*/
|
||||
int intel_gvt_init_host(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (intel_gvt_host.initialized)
|
||||
return 0;
|
||||
|
||||
|
@ -90,7 +92,8 @@ int intel_gvt_init_host(void)
|
|||
return -EINVAL;
|
||||
|
||||
/* Try to detect if we're running in host instead of VM. */
|
||||
if (!intel_gvt_hypervisor_detect_host())
|
||||
ret = intel_gvt_hypervisor_detect_host();
|
||||
if (ret)
|
||||
return -ENODEV;
|
||||
|
||||
gvt_dbg_core("Running with hypervisor %s in host mode\n",
|
||||
|
@ -103,19 +106,20 @@ int intel_gvt_init_host(void)
|
|||
static void init_device_info(struct intel_gvt *gvt)
|
||||
{
|
||||
struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
|
||||
info->max_support_vgpus = 8;
|
||||
info->cfg_space_size = 256;
|
||||
info->mmio_size = 2 * 1024 * 1024;
|
||||
info->mmio_bar = 0;
|
||||
info->msi_cap_offset = IS_SKYLAKE(gvt->dev_priv) ? 0xac : 0x90;
|
||||
info->gtt_start_offset = 8 * 1024 * 1024;
|
||||
info->gtt_entry_size = 8;
|
||||
info->gtt_entry_size_shift = 3;
|
||||
info->gmadr_bytes_in_cmd = 8;
|
||||
info->max_surface_size = 36 * 1024 * 1024;
|
||||
}
|
||||
info->msi_cap_offset = pdev->msi_cap;
|
||||
}
|
||||
|
||||
static int gvt_service_thread(void *data)
|
||||
|
|
|
@ -382,6 +382,8 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
|
|||
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
|
||||
|
||||
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
|
||||
int setup_vgpu_mmio(struct intel_vgpu *vgpu);
|
||||
void populate_pvinfo_page(struct intel_vgpu *vgpu);
|
||||
|
||||
#include "mpt.h"
|
||||
|
||||
|
|
|
@ -239,7 +239,11 @@ static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
vgpu->resetting = true;
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
if (scheduler->current_vgpu == vgpu) {
|
||||
/*
|
||||
* The current_vgpu will set to NULL after stopping the
|
||||
* scheduler when the reset is triggered by current vgpu.
|
||||
*/
|
||||
if (scheduler->current_vgpu == NULL) {
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
|
@ -247,6 +251,16 @@ static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
|
||||
intel_vgpu_reset_execlist(vgpu, bitmap);
|
||||
|
||||
/* full GPU reset */
|
||||
if (bitmap == 0xff) {
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
setup_vgpu_mmio(vgpu);
|
||||
populate_pvinfo_page(vgpu);
|
||||
intel_vgpu_init_gtt(vgpu);
|
||||
}
|
||||
|
||||
vgpu->resetting = false;
|
||||
|
||||
return 0;
|
||||
|
@ -258,6 +272,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
u32 data;
|
||||
u64 bitmap = 0;
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
data = vgpu_vreg(vgpu, offset);
|
||||
|
||||
if (data & GEN6_GRDOM_FULL) {
|
||||
|
@ -1305,7 +1320,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
int ring_id = render_mmio_to_ring_id(vgpu->gvt, offset);
|
||||
struct intel_vgpu_execlist *execlist;
|
||||
u32 data = *(u32 *)p_data;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(ring_id < 0 || ring_id > I915_NUM_ENGINES - 1))
|
||||
return -EINVAL;
|
||||
|
@ -1313,12 +1328,15 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
execlist = &vgpu->execlist[ring_id];
|
||||
|
||||
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
|
||||
if (execlist->elsp_dwords.index == 3)
|
||||
if (execlist->elsp_dwords.index == 3) {
|
||||
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
|
||||
if(ret)
|
||||
gvt_err("fail submit workload on ring %d\n", ring_id);
|
||||
}
|
||||
|
||||
++execlist->elsp_dwords.index;
|
||||
execlist->elsp_dwords.index &= 0x3;
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
|
|
|
@ -163,7 +163,7 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
|
|||
*/
|
||||
void intel_gvt_clean_opregion(struct intel_gvt *gvt)
|
||||
{
|
||||
iounmap(gvt->opregion.opregion_va);
|
||||
memunmap(gvt->opregion.opregion_va);
|
||||
gvt->opregion.opregion_va = NULL;
|
||||
}
|
||||
|
||||
|
@ -181,8 +181,8 @@ int intel_gvt_init_opregion(struct intel_gvt *gvt)
|
|||
pci_read_config_dword(gvt->dev_priv->drm.pdev, INTEL_GVT_PCI_OPREGION,
|
||||
&gvt->opregion.opregion_pa);
|
||||
|
||||
gvt->opregion.opregion_va = acpi_os_ioremap(gvt->opregion.opregion_pa,
|
||||
INTEL_GVT_OPREGION_SIZE);
|
||||
gvt->opregion.opregion_va = memremap(gvt->opregion.opregion_pa,
|
||||
INTEL_GVT_OPREGION_SIZE, MEMREMAP_WB);
|
||||
if (!gvt->opregion.opregion_va) {
|
||||
gvt_err("fail to map host opregion\n");
|
||||
return -EFAULT;
|
||||
|
|
|
@ -118,6 +118,7 @@ static u32 gen9_render_mocs_L3[32];
|
|||
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
enum forcewake_domains fw;
|
||||
i915_reg_t reg;
|
||||
u32 regs[] = {
|
||||
[RCS] = 0x4260,
|
||||
|
@ -135,11 +136,25 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
|
|||
|
||||
reg = _MMIO(regs[ring_id]);
|
||||
|
||||
I915_WRITE(reg, 0x1);
|
||||
/* WaForceWakeRenderDuringMmioTLBInvalidate:skl
|
||||
* we need to put a forcewake when invalidating RCS TLB caches,
|
||||
* otherwise device can go to RC6 state and interrupt invalidation
|
||||
* process
|
||||
*/
|
||||
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
|
||||
FW_REG_READ | FW_REG_WRITE);
|
||||
if (ring_id == RCS && IS_SKYLAKE(dev_priv))
|
||||
fw |= FORCEWAKE_RENDER;
|
||||
|
||||
if (wait_for_atomic((I915_READ(reg) == 0), 50))
|
||||
intel_uncore_forcewake_get(dev_priv, fw);
|
||||
|
||||
I915_WRITE_FW(reg, 0x1);
|
||||
|
||||
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
|
||||
gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id);
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, fw);
|
||||
|
||||
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
|
||||
}
|
||||
|
||||
|
@ -162,6 +177,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
|
|||
if (!IS_SKYLAKE(dev_priv))
|
||||
return;
|
||||
|
||||
offset.reg = regs[ring_id];
|
||||
for (i = 0; i < 64; i++) {
|
||||
gen9_render_mocs[ring_id][i] = I915_READ(offset);
|
||||
I915_WRITE(offset, vgpu_vreg(vgpu, offset));
|
||||
|
@ -199,6 +215,7 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
|
|||
if (!IS_SKYLAKE(dev_priv))
|
||||
return;
|
||||
|
||||
offset.reg = regs[ring_id];
|
||||
for (i = 0; i < 64; i++) {
|
||||
vgpu_vreg(vgpu, offset) = I915_READ(offset);
|
||||
I915_WRITE(offset, gen9_render_mocs[ring_id][i]);
|
||||
|
|
|
@ -402,19 +402,24 @@ static int workload_thread(void *priv)
|
|||
struct intel_vgpu_workload *workload = NULL;
|
||||
int ret;
|
||||
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
|
||||
kfree(p);
|
||||
|
||||
gvt_dbg_core("workload thread for ring %d started\n", ring_id);
|
||||
|
||||
while (!kthread_should_stop()) {
|
||||
ret = wait_event_interruptible(scheduler->waitq[ring_id],
|
||||
kthread_should_stop() ||
|
||||
(workload = pick_next_workload(gvt, ring_id)));
|
||||
add_wait_queue(&scheduler->waitq[ring_id], &wait);
|
||||
do {
|
||||
workload = pick_next_workload(gvt, ring_id);
|
||||
if (workload)
|
||||
break;
|
||||
wait_woken(&wait, TASK_INTERRUPTIBLE,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
} while (!kthread_should_stop());
|
||||
remove_wait_queue(&scheduler->waitq[ring_id], &wait);
|
||||
|
||||
WARN_ON_ONCE(ret);
|
||||
|
||||
if (kthread_should_stop())
|
||||
if (!workload)
|
||||
break;
|
||||
|
||||
mutex_lock(&scheduler_mutex);
|
||||
|
|
|
@ -41,7 +41,7 @@ static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
|
|||
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
|
||||
}
|
||||
|
||||
static int setup_vgpu_mmio(struct intel_vgpu *vgpu)
|
||||
int setup_vgpu_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
|
@ -103,7 +103,7 @@ static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
|
|||
}
|
||||
}
|
||||
|
||||
static void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
||||
void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
||||
{
|
||||
/* setup the ballooning information */
|
||||
vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
|
||||
|
|
Loading…
Reference in New Issue