- Allow internal page allocation to fail (Chris)
- More improvements on logs, dumps, and trace (Chris, Michal) - Coffee Lake important fix for stolen memory (Lucas) - Continue to make GPU reset more robust as well improving selftest coverage for it (Chris) - Unifying debugfs return codes (Michal) - Using existing helper for testing obj pages (Matthew) - Organize and improve gem_request tracepoints (Lionel) - Protect DDI port to DPLL map from theoretical race (Rodrigo) - ... and consequently fixing the indentation on this DDI clk selection function (Chris) - ... and consequently properly serializing non-blocking modesets (Ville) - Add support for horizontal plane flipping on Cannonlake (Joonas) - Two Cannonlake Workarounds for better stability (Rafael) - Fix mess around PSR registers (DK) - More Coffee Lake PCI IDs (Rodrigo) - Remove CSS modifiers on pipe C of Geminilake (Krisman) - Disable all planes for load detection (Ville) - Reorg on i915 display headers (Michal) - Avoid enabling movntdqa optimization on hypervisor guest (Changbin) GVT: - more mmio switch optimization (Weinan) - cleanup i915_reg_t vs. offset usage (Zhenyu) - move write protect handler out of mmio handler (Zhenyu) -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJaPXCRAAoJEPpiX2QO6xPKxi4IAJmAQCVBEZVz2TI/t6xJIYcl xGXlghAVlF8i2bRPpi8PioqUbASF1o7sIVjwIWEV+DgrIQT4MQCv1BmqvExlftBw 5mgkKyS+7Itnp7vaioYRmF/YxMoqP1vHF4J6fBScmtHf+RKtlwXQzw+AnlJtg88h d9mudeDzV5UXB2Prntia3w3sb6oJVKbtgeo+njll2SL6EPaz0sKBEuhcJkKWygtH 4gfneJG0cwIA/rJe4+eIfpnHRiXhhiwofPBYV0eWhBTTo47sKyGxfjpxmEEax1DF 3JUUe9a+2dYqXxOyhLlZEOeCfkcXhkgDmvJTlupWGVV3POIncNlt60lhmuS4t5g= =wJGr -----END PGP SIGNATURE----- Merge tag 'drm-intel-next-2017-12-22' of git://anongit.freedesktop.org/drm/drm-intel into drm-next - Allow internal page allocation to fail (Chris) - More improvements on logs, dumps, and trace (Chris, Michal) - Coffee Lake important fix for stolen memory (Lucas) - Continue to make GPU reset more robust as well improving selftest coverage for it (Chris) - Unifying debugfs return codes (Michal) - Using existing helper for testing obj pages (Matthew) - Organize and improve gem_request tracepoints (Lionel) - Protect DDI port to DPLL map from theoretical race (Rodrigo) - ... and consequently fixing the indentation on this DDI clk selection function (Chris) - ... and consequently properly serializing non-blocking modesets (Ville) - Add support for horizontal plane flipping on Cannonlake (Joonas) - Two Cannonlake Workarounds for better stability (Rafael) - Fix mess around PSR registers (DK) - More Coffee Lake PCI IDs (Rodrigo) - Remove CSS modifiers on pipe C of Geminilake (Krisman) - Disable all planes for load detection (Ville) - Reorg on i915 display headers (Michal) - Avoid enabling movntdqa optimization on hypervisor guest (Changbin) GVT: - more mmio switch optimization (Weinan) - cleanup i915_reg_t vs. offset usage (Zhenyu) - move write protect handler out of mmio handler (Zhenyu) * tag 'drm-intel-next-2017-12-22' of git://anongit.freedesktop.org/drm/drm-intel: (55 commits) drm/i915: Update DRIVER_DATE to 20171222 drm/i915: Show HWSP in intel_engine_dump() drm/i915: Assert that the request is on the execution queue before being removed drm/i915/execlists: Show preemption progress in GEM_TRACE drm/i915: Put all non-blocking modesets onto an ordered wq drm/i915: Disable GMBUS clock gating around GMBUS transfers on gen9+ drm/i915: Clean up the PNV bit banging vs. GMBUS clock gating w/a drm/i915: No need to power up PG2 for GMBUS on BXT drm/i915: Disable DC states around GMBUS on GLK drm/i915: Do not enable movntdqa optimization in hypervisor guest drm/i915: Dump device info at once drm/i915: Add pretty printer for runtime part of intel_device_info drm/i915: Update intel_device_info_runtime_init() parameter drm/i915: Move intel_device_info definitions to its own header drm/i915: Move opregion definitions to dedicated intel_opregion.h drm/i915: Move display related definitions to dedicated header drm/i915: Move some utility functions to i915_util.h drm/i915/gvt: move write protect handler out of mmio emulation function drm/i915/gvt: cleanup usage for typed mmio reg vs. offset drm/i915/gvt: Fix pipe A enable as default for vgpu ...
This commit is contained in:
commit
350877626f
|
@ -528,6 +528,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
|
|||
INTEL_SKL_IDS(&gen9_early_ops),
|
||||
INTEL_BXT_IDS(&gen9_early_ops),
|
||||
INTEL_KBL_IDS(&gen9_early_ops),
|
||||
INTEL_CFL_IDS(&gen9_early_ops),
|
||||
INTEL_GLK_IDS(&gen9_early_ops),
|
||||
INTEL_CNL_IDS(&gen9_early_ops),
|
||||
};
|
||||
|
|
|
@ -29,7 +29,6 @@ config DRM_I915_DEBUG
|
|||
select SW_SYNC # signaling validation framework (igt/syncobj*)
|
||||
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
|
||||
select DRM_I915_SELFTEST
|
||||
select DRM_I915_TRACE_GEM
|
||||
default n
|
||||
help
|
||||
Choose this option to turn on extra driver debugging that may affect
|
||||
|
@ -53,6 +52,7 @@ config DRM_I915_DEBUG_GEM
|
|||
|
||||
config DRM_I915_TRACE_GEM
|
||||
bool "Insert extra ftrace output from the GEM internals"
|
||||
depends on DRM_I915_DEBUG_GEM
|
||||
select TRACING
|
||||
default n
|
||||
help
|
||||
|
|
|
@ -825,6 +825,21 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline bool is_mocs_mmio(unsigned int offset)
|
||||
{
|
||||
return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
|
||||
((offset >= 0xb020) && (offset <= 0xb0a0));
|
||||
}
|
||||
|
||||
static int mocs_cmd_reg_handler(struct parser_exec_state *s,
|
||||
unsigned int offset, unsigned int index)
|
||||
{
|
||||
if (!is_mocs_mmio(offset))
|
||||
return -EINVAL;
|
||||
vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cmd_reg_handler(struct parser_exec_state *s,
|
||||
unsigned int offset, unsigned int index, char *cmd)
|
||||
{
|
||||
|
@ -848,6 +863,10 @@ static int cmd_reg_handler(struct parser_exec_state *s,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (is_mocs_mmio(offset) &&
|
||||
mocs_cmd_reg_handler(s, offset, index))
|
||||
return -EINVAL;
|
||||
|
||||
if (is_force_nonpriv_mmio(offset) &&
|
||||
force_nonpriv_reg_handler(s, offset, index))
|
||||
return -EPERM;
|
||||
|
@ -1220,13 +1239,13 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
|
|||
return 0;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
|
||||
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
|
||||
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
|
||||
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
|
||||
GENMASK(12, 10)) >> 10;
|
||||
} else {
|
||||
stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
|
||||
stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) &
|
||||
GENMASK(15, 6)) >> 6;
|
||||
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
|
||||
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
|
||||
}
|
||||
|
||||
if (stride != info->stride_val)
|
||||
|
@ -1245,21 +1264,21 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
|
|||
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
|
||||
struct intel_vgpu *vgpu = s->vgpu;
|
||||
|
||||
set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
|
||||
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
|
||||
info->surf_val << 12);
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
|
||||
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
|
||||
info->stride_val);
|
||||
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
|
||||
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
|
||||
info->tile_val << 10);
|
||||
} else {
|
||||
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
|
||||
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6),
|
||||
info->stride_val << 6);
|
||||
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
|
||||
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10),
|
||||
info->tile_val << 10);
|
||||
}
|
||||
|
||||
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
|
||||
vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, info->event);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
|
||||
if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
|
||||
return 0;
|
||||
|
||||
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
|
||||
|
@ -74,7 +74,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
|
|||
if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
|
||||
return -EINVAL;
|
||||
|
||||
if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
|
||||
if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
|
||||
return 1;
|
||||
|
||||
if (edp_pipe_is_enabled(vgpu) &&
|
||||
|
@ -169,103 +169,105 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
|
|||
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
|
||||
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
|
||||
SDE_PORTC_HOTPLUG_CPT |
|
||||
SDE_PORTD_HOTPLUG_CPT);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
|
||||
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
|
||||
SDE_PORTE_HOTPLUG_SPT);
|
||||
vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
|
||||
vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
|
||||
SKL_FUSE_DOWNLOAD_STATUS |
|
||||
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
|
||||
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
|
||||
SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
|
||||
vgpu_vreg(vgpu, LCPLL1_CTL) |=
|
||||
vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
|
||||
LCPLL_PLL_ENABLE |
|
||||
LCPLL_PLL_LOCK;
|
||||
vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
|
||||
vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
|
||||
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
|
||||
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
|
||||
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
|
||||
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
|
||||
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
|
||||
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
||||
TRANS_DDI_PORT_MASK);
|
||||
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
|
||||
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||
(PORT_B << TRANS_DDI_PORT_SHIFT) |
|
||||
TRANS_DDI_FUNC_ENABLE);
|
||||
if (IS_BROADWELL(dev_priv)) {
|
||||
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &=
|
||||
vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) &=
|
||||
~PORT_CLK_SEL_MASK;
|
||||
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |=
|
||||
vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) |=
|
||||
PORT_CLK_SEL_LCPLL_810;
|
||||
}
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
|
||||
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
|
||||
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
|
||||
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
|
||||
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
|
||||
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
|
||||
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
|
||||
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
||||
TRANS_DDI_PORT_MASK);
|
||||
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
|
||||
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||
(PORT_C << TRANS_DDI_PORT_SHIFT) |
|
||||
TRANS_DDI_FUNC_ENABLE);
|
||||
if (IS_BROADWELL(dev_priv)) {
|
||||
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &=
|
||||
vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) &=
|
||||
~PORT_CLK_SEL_MASK;
|
||||
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |=
|
||||
vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) |=
|
||||
PORT_CLK_SEL_LCPLL_810;
|
||||
}
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
|
||||
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
|
||||
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
|
||||
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
|
||||
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
|
||||
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
|
||||
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
|
||||
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
|
||||
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
|
||||
TRANS_DDI_PORT_MASK);
|
||||
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
|
||||
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
|
||||
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
|
||||
(PORT_D << TRANS_DDI_PORT_SHIFT) |
|
||||
TRANS_DDI_FUNC_ENABLE);
|
||||
if (IS_BROADWELL(dev_priv)) {
|
||||
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &=
|
||||
vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) &=
|
||||
~PORT_CLK_SEL_MASK;
|
||||
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |=
|
||||
vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) |=
|
||||
PORT_CLK_SEL_LCPLL_810;
|
||||
}
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
|
||||
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
|
||||
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
|
||||
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
|
||||
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
|
||||
}
|
||||
|
||||
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
|
||||
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
|
||||
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
|
||||
}
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
|
||||
GEN8_PORT_DP_A_HOTPLUG;
|
||||
else
|
||||
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
|
||||
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
|
||||
|
||||
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
|
||||
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
|
||||
}
|
||||
|
||||
/* Clear host CRT status, so guest couldn't detect this host CRT. */
|
||||
if (IS_BROADWELL(dev_priv))
|
||||
vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
|
||||
vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
|
||||
|
||||
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
|
||||
}
|
||||
|
||||
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
|
||||
|
@ -282,7 +284,6 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
|
|||
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
|
||||
int type, unsigned int resolution)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
|
||||
|
||||
if (WARN_ON(resolution >= GVT_EDID_NUM))
|
||||
|
@ -308,7 +309,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
|
|||
port->type = type;
|
||||
|
||||
emulate_monitor_status_change(vgpu);
|
||||
vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -368,12 +369,12 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
|
|||
if (!pipe_is_enabled(vgpu, pipe))
|
||||
continue;
|
||||
|
||||
vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
|
||||
vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||
}
|
||||
|
||||
if (pipe_is_enabled(vgpu, pipe)) {
|
||||
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
|
||||
vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
|
||||
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -95,9 +95,9 @@ static inline int get_port_from_gmbus0(u32 gmbus0)
|
|||
|
||||
static void reset_gmbus_controller(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
|
||||
vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
|
||||
if (!vgpu->display.i2c_edid.edid_available)
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
|
||||
vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
|
||||
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
|
||||
}
|
||||
|
||||
|
@ -123,16 +123,16 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
|
|||
vgpu->display.i2c_edid.state = I2C_GMBUS;
|
||||
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
|
||||
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
|
||||
vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
|
||||
vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
|
||||
|
||||
if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
|
||||
!intel_vgpu_port_is_dp(vgpu, port)) {
|
||||
vgpu->display.i2c_edid.port = port;
|
||||
vgpu->display.i2c_edid.edid_available = true;
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
|
||||
vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
|
||||
} else
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
|
||||
vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -159,8 +159,8 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
* 2) HW_RDY bit asserted
|
||||
*/
|
||||
if (wvalue & GMBUS_SW_CLR_INT) {
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
|
||||
vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
|
||||
vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
|
||||
}
|
||||
|
||||
/* For virtualization, we suppose that HW is always ready,
|
||||
|
@ -208,7 +208,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
* visible in gmbus interface)
|
||||
*/
|
||||
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
|
||||
vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
|
||||
}
|
||||
break;
|
||||
case NIDX_NS_W:
|
||||
|
@ -220,7 +220,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
* START (-->INDEX) -->DATA
|
||||
*/
|
||||
i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
|
||||
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
|
||||
vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
|
||||
break;
|
||||
default:
|
||||
gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
|
||||
|
@ -256,7 +256,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
|||
u32 reg_data = 0;
|
||||
|
||||
/* Data can only be recevied if previous settings correct */
|
||||
if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
|
||||
if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
|
||||
if (byte_left <= 0) {
|
||||
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
|
||||
return 0;
|
||||
|
|
|
@ -147,7 +147,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
|
|||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
u32 stride_reg = vgpu_vreg(vgpu, DSPSTRIDE(pipe)) & stride_mask;
|
||||
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
|
||||
u32 stride = stride_reg;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
|
@ -209,7 +209,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
|||
if (pipe >= I915_MAX_PIPES)
|
||||
return -ENODEV;
|
||||
|
||||
val = vgpu_vreg(vgpu, DSPCNTR(pipe));
|
||||
val = vgpu_vreg_t(vgpu, DSPCNTR(pipe));
|
||||
plane->enabled = !!(val & DISPLAY_PLANE_ENABLE);
|
||||
if (!plane->enabled)
|
||||
return -ENODEV;
|
||||
|
@ -244,7 +244,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
|||
|
||||
plane->hw_format = fmt;
|
||||
|
||||
plane->base = vgpu_vreg(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
|
||||
plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
|
||||
gvt_vgpu_err("invalid gma address: %lx\n",
|
||||
(unsigned long)plane->base);
|
||||
|
@ -263,14 +263,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
|
|||
(_PRI_PLANE_STRIDE_MASK >> 6) :
|
||||
_PRI_PLANE_STRIDE_MASK, plane->bpp);
|
||||
|
||||
plane->width = (vgpu_vreg(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >>
|
||||
plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >>
|
||||
_PIPE_H_SRCSZ_SHIFT;
|
||||
plane->width += 1;
|
||||
plane->height = (vgpu_vreg(vgpu, PIPESRC(pipe)) &
|
||||
plane->height = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) &
|
||||
_PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT;
|
||||
plane->height += 1; /* raw height is one minus the real value */
|
||||
|
||||
val = vgpu_vreg(vgpu, DSPTILEOFF(pipe));
|
||||
val = vgpu_vreg_t(vgpu, DSPTILEOFF(pipe));
|
||||
plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >>
|
||||
_PRI_PLANE_X_OFF_SHIFT;
|
||||
plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >>
|
||||
|
@ -344,7 +344,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
|
|||
if (pipe >= I915_MAX_PIPES)
|
||||
return -ENODEV;
|
||||
|
||||
val = vgpu_vreg(vgpu, CURCNTR(pipe));
|
||||
val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
|
||||
mode = val & CURSOR_MODE;
|
||||
plane->enabled = (mode != CURSOR_MODE_DISABLE);
|
||||
if (!plane->enabled)
|
||||
|
@ -370,7 +370,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
|
|||
gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n",
|
||||
alpha_plane, alpha_force);
|
||||
|
||||
plane->base = vgpu_vreg(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
|
||||
plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
|
||||
gvt_vgpu_err("invalid gma address: %lx\n",
|
||||
(unsigned long)plane->base);
|
||||
|
@ -384,7 +384,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
val = vgpu_vreg(vgpu, CURPOS(pipe));
|
||||
val = vgpu_vreg_t(vgpu, CURPOS(pipe));
|
||||
plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT;
|
||||
plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT;
|
||||
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
|
||||
|
@ -424,7 +424,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
|
|||
if (pipe >= I915_MAX_PIPES)
|
||||
return -ENODEV;
|
||||
|
||||
val = vgpu_vreg(vgpu, SPRCTL(pipe));
|
||||
val = vgpu_vreg_t(vgpu, SPRCTL(pipe));
|
||||
plane->enabled = !!(val & SPRITE_ENABLE);
|
||||
if (!plane->enabled)
|
||||
return -ENODEV;
|
||||
|
@ -475,7 +475,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
|
|||
|
||||
plane->drm_format = drm_format;
|
||||
|
||||
plane->base = vgpu_vreg(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
|
||||
plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
|
||||
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
|
||||
gvt_vgpu_err("invalid gma address: %lx\n",
|
||||
(unsigned long)plane->base);
|
||||
|
@ -489,10 +489,10 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
plane->stride = vgpu_vreg(vgpu, SPRSTRIDE(pipe)) &
|
||||
plane->stride = vgpu_vreg_t(vgpu, SPRSTRIDE(pipe)) &
|
||||
_SPRITE_STRIDE_MASK;
|
||||
|
||||
val = vgpu_vreg(vgpu, SPRSIZE(pipe));
|
||||
val = vgpu_vreg_t(vgpu, SPRSIZE(pipe));
|
||||
plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >>
|
||||
_SPRITE_SIZE_HEIGHT_SHIFT;
|
||||
plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >>
|
||||
|
@ -500,11 +500,11 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
|
|||
plane->height += 1; /* raw height is one minus the real value */
|
||||
plane->width += 1; /* raw width is one minus the real value */
|
||||
|
||||
val = vgpu_vreg(vgpu, SPRPOS(pipe));
|
||||
val = vgpu_vreg_t(vgpu, SPRPOS(pipe));
|
||||
plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT;
|
||||
plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT;
|
||||
|
||||
val = vgpu_vreg(vgpu, SPROFFSET(pipe));
|
||||
val = vgpu_vreg_t(vgpu, SPROFFSET(pipe));
|
||||
plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >>
|
||||
_SPRITE_OFFSET_START_X_SHIFT;
|
||||
plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >>
|
||||
|
|
|
@ -1968,6 +1968,39 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
int ret = 0;
|
||||
|
||||
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
|
||||
struct intel_vgpu_page_track *t;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (t) {
|
||||
if (unlikely(vgpu->failsafe)) {
|
||||
/* remove write protection to prevent furture traps */
|
||||
intel_vgpu_clean_page_track(vgpu, t);
|
||||
} else {
|
||||
ret = t->handler(t, pa, p_data, bytes);
|
||||
if (ret) {
|
||||
gvt_err("guest page write error %d, "
|
||||
"gfn 0x%lx, pa 0x%llx, "
|
||||
"var 0x%x, len %d\n",
|
||||
ret, t->gfn, pa,
|
||||
*(u32 *)p_data, bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||
intel_gvt_gtt_type_t type)
|
||||
{
|
||||
|
@ -2244,7 +2277,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
|
|||
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
int page_table_level)
|
||||
{
|
||||
u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
|
||||
u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
|
||||
struct intel_vgpu_mm *mm;
|
||||
|
||||
if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
|
||||
|
@ -2279,7 +2312,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
|
|||
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
int page_table_level)
|
||||
{
|
||||
u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0]));
|
||||
u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
|
||||
struct intel_vgpu_mm *mm;
|
||||
|
||||
if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
|
||||
|
|
|
@ -308,4 +308,7 @@ int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
|
|||
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
|
||||
unsigned int off, void *p_data, unsigned int bytes);
|
||||
|
||||
int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
|
||||
void *p_data, unsigned int bytes);
|
||||
|
||||
#endif /* _GVT_GTT_H_ */
|
||||
|
|
|
@ -183,6 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
|
|||
.get_gvt_attrs = intel_get_gvt_attrs,
|
||||
.vgpu_query_plane = intel_vgpu_query_plane,
|
||||
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
|
||||
.write_protect_handler = intel_vgpu_write_protect_handler,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -412,23 +412,20 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
|
|||
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
u32 fence, u64 value);
|
||||
|
||||
/* Macros for easily accessing vGPU virtual/shadow register */
|
||||
#define vgpu_vreg(vgpu, reg) \
|
||||
(*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_vreg8(vgpu, reg) \
|
||||
(*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_vreg16(vgpu, reg) \
|
||||
(*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_vreg64(vgpu, reg) \
|
||||
(*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_sreg(vgpu, reg) \
|
||||
(*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_sreg8(vgpu, reg) \
|
||||
(*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_sreg16(vgpu, reg) \
|
||||
(*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
#define vgpu_sreg64(vgpu, reg) \
|
||||
(*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
|
||||
/* Macros for easily accessing vGPU virtual/shadow register.
|
||||
Explicitly seperate use for typed MMIO reg or real offset.*/
|
||||
#define vgpu_vreg_t(vgpu, reg) \
|
||||
(*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
|
||||
#define vgpu_vreg(vgpu, offset) \
|
||||
(*(u32 *)(vgpu->mmio.vreg + (offset)))
|
||||
#define vgpu_vreg64_t(vgpu, reg) \
|
||||
(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
|
||||
#define vgpu_vreg64(vgpu, offset) \
|
||||
(*(u64 *)(vgpu->mmio.vreg + (offset)))
|
||||
#define vgpu_sreg_t(vgpu, reg) \
|
||||
(*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
|
||||
#define vgpu_sreg(vgpu, offset) \
|
||||
(*(u32 *)(vgpu->mmio.sreg + (offset)))
|
||||
|
||||
#define for_each_active_vgpu(gvt, vgpu, id) \
|
||||
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
|
||||
|
@ -549,6 +546,8 @@ struct intel_gvt_ops {
|
|||
struct attribute_group ***intel_vgpu_type_groups);
|
||||
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
|
||||
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
|
||||
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
|
||||
unsigned int);
|
||||
};
|
||||
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1360,8 +1360,8 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|||
struct kvmgt_guest_info, track_node);
|
||||
|
||||
if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
|
||||
intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa,
|
||||
(void *)val, len);
|
||||
intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
|
||||
(void *)val, len);
|
||||
}
|
||||
|
||||
static void kvmgt_page_track_flush_slot(struct kvm *kvm,
|
||||
|
|
|
@ -117,25 +117,6 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
else
|
||||
memcpy(pt, p_data, bytes);
|
||||
|
||||
} else if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
|
||||
struct intel_vgpu_page_track *t;
|
||||
|
||||
/* Since we enter the failsafe mode early during guest boot,
|
||||
* guest may not have chance to set up its ppgtt table, so
|
||||
* there should not be any wp pages for guest. Keep the wp
|
||||
* related code here in case we need to handle it in furture.
|
||||
*/
|
||||
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (t) {
|
||||
/* remove write protection to prevent furture traps */
|
||||
intel_vgpu_clean_page_track(vgpu, t);
|
||||
if (read)
|
||||
intel_gvt_hypervisor_read_gpa(vgpu, pa,
|
||||
p_data, bytes);
|
||||
else
|
||||
intel_gvt_hypervisor_write_gpa(vgpu, pa,
|
||||
p_data, bytes);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
|
@ -168,23 +149,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
|
||||
struct intel_vgpu_page_track *t;
|
||||
|
||||
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (t) {
|
||||
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
|
||||
p_data, bytes);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("guest page read error %d, "
|
||||
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
|
||||
ret, t->gfn, pa, *(u32 *)p_data,
|
||||
bytes);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
||||
|
||||
if (WARN_ON(bytes > 8))
|
||||
|
@ -263,23 +227,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
|
||||
struct intel_vgpu_page_track *t;
|
||||
|
||||
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
|
||||
if (t) {
|
||||
ret = t->handler(t, pa, p_data, bytes);
|
||||
if (ret) {
|
||||
gvt_err("guest page write error %d, "
|
||||
"gfn 0x%lx, pa 0x%llx, "
|
||||
"var 0x%x, len %d\n",
|
||||
ret, t->gfn, pa,
|
||||
*(u32 *)p_data, bytes);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
|
||||
|
||||
if (WARN_ON(bytes > 8))
|
||||
|
@ -336,10 +283,10 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
|
|||
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
|
||||
|
||||
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
|
||||
vgpu->mmio.disable_warn_untrack = false;
|
||||
} else {
|
||||
|
|
|
@ -76,13 +76,6 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
|
|||
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
|
||||
void *data);
|
||||
|
||||
|
||||
#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
|
||||
typeof(reg) __reg = reg; \
|
||||
u32 *offset = (u32 *)&__reg; \
|
||||
*offset; \
|
||||
})
|
||||
|
||||
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
|
||||
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
|
||||
|
|
|
@ -149,8 +149,41 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
|
|||
{ /* Terminated */ }
|
||||
};
|
||||
|
||||
static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
|
||||
static u32 gen9_render_mocs_L3[32];
|
||||
static struct {
|
||||
bool initialized;
|
||||
u32 control_table[I915_NUM_ENGINES][64];
|
||||
u32 l3cc_table[32];
|
||||
} gen9_render_mocs;
|
||||
|
||||
static void load_render_mocs(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
i915_reg_t offset;
|
||||
u32 regs[] = {
|
||||
[RCS] = 0xc800,
|
||||
[VCS] = 0xc900,
|
||||
[VCS2] = 0xca00,
|
||||
[BCS] = 0xcc00,
|
||||
[VECS] = 0xcb00,
|
||||
};
|
||||
int ring_id, i;
|
||||
|
||||
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
|
||||
offset.reg = regs[ring_id];
|
||||
for (i = 0; i < 64; i++) {
|
||||
gen9_render_mocs.control_table[ring_id][i] =
|
||||
I915_READ_FW(offset);
|
||||
offset.reg += 4;
|
||||
}
|
||||
}
|
||||
|
||||
offset.reg = 0xb020;
|
||||
for (i = 0; i < 32; i++) {
|
||||
gen9_render_mocs.l3cc_table[i] =
|
||||
I915_READ_FW(offset);
|
||||
offset.reg += 4;
|
||||
}
|
||||
gen9_render_mocs.initialized = true;
|
||||
}
|
||||
|
||||
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
|
@ -191,17 +224,20 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
|
|||
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
|
||||
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
|
||||
else
|
||||
vgpu_vreg(vgpu, regs[ring_id]) = 0;
|
||||
vgpu_vreg_t(vgpu, reg) = 0;
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, fw);
|
||||
|
||||
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
|
||||
}
|
||||
|
||||
static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
|
||||
static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
|
||||
int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct drm_i915_private *dev_priv;
|
||||
i915_reg_t offset, l3_offset;
|
||||
u32 old_v, new_v;
|
||||
|
||||
u32 regs[] = {
|
||||
[RCS] = 0xc800,
|
||||
[VCS] = 0xc900,
|
||||
|
@ -211,54 +247,45 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
|
|||
};
|
||||
int i;
|
||||
|
||||
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
|
||||
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
||||
return;
|
||||
|
||||
if (!pre && !gen9_render_mocs.initialized)
|
||||
load_render_mocs(dev_priv);
|
||||
|
||||
offset.reg = regs[ring_id];
|
||||
for (i = 0; i < 64; i++) {
|
||||
gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
|
||||
I915_WRITE_FW(offset, vgpu_vreg(vgpu, offset));
|
||||
if (pre)
|
||||
old_v = vgpu_vreg_t(pre, offset);
|
||||
else
|
||||
old_v = gen9_render_mocs.control_table[ring_id][i];
|
||||
if (next)
|
||||
new_v = vgpu_vreg_t(next, offset);
|
||||
else
|
||||
new_v = gen9_render_mocs.control_table[ring_id][i];
|
||||
|
||||
if (old_v != new_v)
|
||||
I915_WRITE_FW(offset, new_v);
|
||||
|
||||
offset.reg += 4;
|
||||
}
|
||||
|
||||
if (ring_id == RCS) {
|
||||
l3_offset.reg = 0xb020;
|
||||
for (i = 0; i < 32; i++) {
|
||||
gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
|
||||
I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
|
||||
l3_offset.reg += 4;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (pre)
|
||||
old_v = vgpu_vreg_t(pre, l3_offset);
|
||||
else
|
||||
old_v = gen9_render_mocs.l3cc_table[i];
|
||||
if (next)
|
||||
new_v = vgpu_vreg_t(next, l3_offset);
|
||||
else
|
||||
new_v = gen9_render_mocs.l3cc_table[i];
|
||||
|
||||
static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
i915_reg_t offset, l3_offset;
|
||||
u32 regs[] = {
|
||||
[RCS] = 0xc800,
|
||||
[VCS] = 0xc900,
|
||||
[VCS2] = 0xca00,
|
||||
[BCS] = 0xcc00,
|
||||
[VECS] = 0xcb00,
|
||||
};
|
||||
int i;
|
||||
if (old_v != new_v)
|
||||
I915_WRITE_FW(l3_offset, new_v);
|
||||
|
||||
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
|
||||
return;
|
||||
|
||||
offset.reg = regs[ring_id];
|
||||
for (i = 0; i < 64; i++) {
|
||||
vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
|
||||
I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
|
||||
offset.reg += 4;
|
||||
}
|
||||
|
||||
if (ring_id == RCS) {
|
||||
l3_offset.reg = 0xb020;
|
||||
for (i = 0; i < 32; i++) {
|
||||
vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset);
|
||||
I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]);
|
||||
l3_offset.reg += 4;
|
||||
}
|
||||
}
|
||||
|
@ -266,84 +293,77 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
|
|||
|
||||
#define CTX_CONTEXT_CONTROL_VAL 0x03
|
||||
|
||||
/* Switch ring mmio values (context) from host to a vgpu. */
|
||||
static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
|
||||
/* Switch ring mmio values (context). */
|
||||
static void switch_mmio(struct intel_vgpu *pre,
|
||||
struct intel_vgpu *next,
|
||||
int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
u32 *reg_state = s->shadow_ctx->engine[ring_id].lrc_reg_state;
|
||||
u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct intel_vgpu_submission *s;
|
||||
u32 *reg_state, ctx_ctrl;
|
||||
u32 inhibit_mask =
|
||||
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
|
||||
struct engine_mmio *mmio;
|
||||
u32 v;
|
||||
u32 old_v, new_v;
|
||||
|
||||
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
load_mocs(vgpu, ring_id);
|
||||
switch_mocs(pre, next, ring_id);
|
||||
|
||||
mmio = vgpu->gvt->engine_mmio_list;
|
||||
mmio = dev_priv->gvt->engine_mmio_list;
|
||||
while (i915_mmio_reg_offset((mmio++)->reg)) {
|
||||
if (mmio->ring_id != ring_id)
|
||||
continue;
|
||||
|
||||
mmio->value = I915_READ_FW(mmio->reg);
|
||||
|
||||
/*
|
||||
* if it is an inhibit context, load in_context mmio
|
||||
* into HW by mmio write. If it is not, skip this mmio
|
||||
* write.
|
||||
*/
|
||||
if (mmio->in_context &&
|
||||
(ctx_ctrl & inhibit_mask) != inhibit_mask)
|
||||
continue;
|
||||
|
||||
if (mmio->mask)
|
||||
v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
|
||||
else
|
||||
v = vgpu_vreg(vgpu, mmio->reg);
|
||||
|
||||
I915_WRITE_FW(mmio->reg, v);
|
||||
|
||||
trace_render_mmio(vgpu->id, "load",
|
||||
i915_mmio_reg_offset(mmio->reg),
|
||||
mmio->value, v);
|
||||
}
|
||||
|
||||
handle_tlb_pending_event(vgpu, ring_id);
|
||||
}
|
||||
|
||||
/* Switch ring mmio values (context) from vgpu to host. */
|
||||
static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct engine_mmio *mmio;
|
||||
u32 v;
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
restore_mocs(vgpu, ring_id);
|
||||
|
||||
mmio = vgpu->gvt->engine_mmio_list;
|
||||
while (i915_mmio_reg_offset((mmio++)->reg)) {
|
||||
if (mmio->ring_id != ring_id)
|
||||
continue;
|
||||
|
||||
vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg);
|
||||
|
||||
if (mmio->mask) {
|
||||
vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16);
|
||||
v = mmio->value | (mmio->mask << 16);
|
||||
// save
|
||||
if (pre) {
|
||||
vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
|
||||
if (mmio->mask)
|
||||
vgpu_vreg_t(pre, mmio->reg) &=
|
||||
~(mmio->mask << 16);
|
||||
old_v = vgpu_vreg_t(pre, mmio->reg);
|
||||
} else
|
||||
v = mmio->value;
|
||||
old_v = mmio->value = I915_READ_FW(mmio->reg);
|
||||
|
||||
if (mmio->in_context)
|
||||
continue;
|
||||
// restore
|
||||
if (next) {
|
||||
s = &next->submission;
|
||||
reg_state =
|
||||
s->shadow_ctx->engine[ring_id].lrc_reg_state;
|
||||
ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
|
||||
/*
|
||||
* if it is an inhibit context, load in_context mmio
|
||||
* into HW by mmio write. If it is not, skip this mmio
|
||||
* write.
|
||||
*/
|
||||
if (mmio->in_context &&
|
||||
(ctx_ctrl & inhibit_mask) != inhibit_mask)
|
||||
continue;
|
||||
|
||||
I915_WRITE_FW(mmio->reg, v);
|
||||
if (mmio->mask)
|
||||
new_v = vgpu_vreg_t(next, mmio->reg) |
|
||||
(mmio->mask << 16);
|
||||
else
|
||||
new_v = vgpu_vreg_t(next, mmio->reg);
|
||||
} else {
|
||||
if (mmio->in_context)
|
||||
continue;
|
||||
if (mmio->mask)
|
||||
new_v = mmio->value | (mmio->mask << 16);
|
||||
else
|
||||
new_v = mmio->value;
|
||||
}
|
||||
|
||||
trace_render_mmio(vgpu->id, "restore",
|
||||
I915_WRITE_FW(mmio->reg, new_v);
|
||||
|
||||
trace_render_mmio(pre ? pre->id : 0,
|
||||
next ? next->id : 0,
|
||||
"switch",
|
||||
i915_mmio_reg_offset(mmio->reg),
|
||||
mmio->value, v);
|
||||
old_v, new_v);
|
||||
}
|
||||
|
||||
if (next)
|
||||
handle_tlb_pending_event(next, ring_id);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -374,17 +394,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
|
|||
* handle forcewake mannually.
|
||||
*/
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
/**
|
||||
* TODO: Optimize for vGPU to vGPU switch by merging
|
||||
* switch_mmio_to_host() and switch_mmio_to_vgpu().
|
||||
*/
|
||||
if (pre)
|
||||
switch_mmio_to_host(pre, ring_id);
|
||||
|
||||
if (next)
|
||||
switch_mmio_to_vgpu(next, ring_id);
|
||||
|
||||
switch_mmio(pre, next, ring_id);
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
}
|
||||
|
||||
|
|
|
@ -330,13 +330,14 @@ TRACE_EVENT(inject_msi,
|
|||
);
|
||||
|
||||
TRACE_EVENT(render_mmio,
|
||||
TP_PROTO(int id, char *action, unsigned int reg,
|
||||
TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,
|
||||
unsigned int old_val, unsigned int new_val),
|
||||
|
||||
TP_ARGS(id, action, reg, new_val, old_val),
|
||||
TP_ARGS(old_id, new_id, action, reg, new_val, old_val),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, id)
|
||||
__field(int, old_id)
|
||||
__field(int, new_id)
|
||||
__array(char, buf, GVT_TEMP_STR_LEN)
|
||||
__field(unsigned int, reg)
|
||||
__field(unsigned int, old_val)
|
||||
|
@ -344,15 +345,17 @@ TRACE_EVENT(render_mmio,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->id = id;
|
||||
__entry->old_id = old_id;
|
||||
__entry->new_id = new_id;
|
||||
snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", action);
|
||||
__entry->reg = reg;
|
||||
__entry->old_val = old_val;
|
||||
__entry->new_val = new_val;
|
||||
),
|
||||
|
||||
TP_printk("VM%u %s reg %x, old %08x new %08x\n",
|
||||
__entry->id, __entry->buf, __entry->reg,
|
||||
TP_printk("VM%u -> VM%u %s reg %x, old %08x new %08x\n",
|
||||
__entry->old_id, __entry->new_id,
|
||||
__entry->buf, __entry->reg,
|
||||
__entry->old_val, __entry->new_val)
|
||||
);
|
||||
|
||||
|
|
|
@ -38,25 +38,25 @@
|
|||
void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
||||
{
|
||||
/* setup the ballooning information */
|
||||
vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
|
||||
vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1;
|
||||
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0;
|
||||
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0;
|
||||
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
|
||||
vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
|
||||
|
||||
vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
|
||||
vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
|
||||
|
||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
|
||||
vgpu_aperture_gmadr_base(vgpu);
|
||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
|
||||
vgpu_aperture_sz(vgpu);
|
||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
|
||||
vgpu_hidden_gmadr_base(vgpu);
|
||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
|
||||
vgpu_hidden_sz(vgpu);
|
||||
|
||||
vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
|
||||
|
||||
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
|
||||
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
|
||||
|
|
|
@ -37,40 +37,21 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
|
|||
return to_i915(node->minor->dev);
|
||||
}
|
||||
|
||||
static __always_inline void seq_print_param(struct seq_file *m,
|
||||
const char *name,
|
||||
const char *type,
|
||||
const void *x)
|
||||
{
|
||||
if (!__builtin_strcmp(type, "bool"))
|
||||
seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
|
||||
else if (!__builtin_strcmp(type, "int"))
|
||||
seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
|
||||
else if (!__builtin_strcmp(type, "unsigned int"))
|
||||
seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
|
||||
else if (!__builtin_strcmp(type, "char *"))
|
||||
seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
|
||||
else
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
static int i915_capabilities(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
const struct intel_device_info *info = INTEL_INFO(dev_priv);
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
|
||||
seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
|
||||
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
|
||||
|
||||
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
|
||||
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
|
||||
#undef PRINT_FLAG
|
||||
intel_device_info_dump_flags(info, &p);
|
||||
intel_device_info_dump_runtime(info, &p);
|
||||
|
||||
kernel_param_lock(THIS_MODULE);
|
||||
#define PRINT_PARAM(T, x, ...) seq_print_param(m, #x, #T, &i915_modparams.x);
|
||||
I915_PARAMS_FOR_EACH(PRINT_PARAM);
|
||||
#undef PRINT_PARAM
|
||||
i915_params_dump(&i915_modparams, &p);
|
||||
kernel_param_unlock(THIS_MODULE);
|
||||
|
||||
return 0;
|
||||
|
@ -1601,20 +1582,23 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
|
|||
static int i915_fbc_status(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct intel_fbc *fbc = &dev_priv->fbc;
|
||||
|
||||
if (!HAS_FBC(dev_priv)) {
|
||||
seq_puts(m, "FBC unsupported on this chipset\n");
|
||||
return 0;
|
||||
}
|
||||
if (!HAS_FBC(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
mutex_lock(&dev_priv->fbc.lock);
|
||||
mutex_lock(&fbc->lock);
|
||||
|
||||
if (intel_fbc_is_active(dev_priv))
|
||||
seq_puts(m, "FBC enabled\n");
|
||||
else
|
||||
seq_printf(m, "FBC disabled: %s\n",
|
||||
dev_priv->fbc.no_fbc_reason);
|
||||
seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
|
||||
|
||||
if (fbc->work.scheduled)
|
||||
seq_printf(m, "FBC worker scheduled on vblank %u, now %llu\n",
|
||||
fbc->work.scheduled_vblank,
|
||||
drm_crtc_vblank_count(&fbc->crtc->base));
|
||||
|
||||
if (intel_fbc_is_active(dev_priv)) {
|
||||
u32 mask;
|
||||
|
@ -1634,7 +1618,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
|
|||
seq_printf(m, "Compressing: %s\n", yesno(mask));
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->fbc.lock);
|
||||
mutex_unlock(&fbc->lock);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
@ -1681,10 +1665,8 @@ static int i915_ips_status(struct seq_file *m, void *unused)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
|
||||
if (!HAS_IPS(dev_priv)) {
|
||||
seq_puts(m, "not supported\n");
|
||||
return 0;
|
||||
}
|
||||
if (!HAS_IPS(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
|
@ -1770,10 +1752,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
|
|||
int gpu_freq, ia_freq;
|
||||
unsigned int max_gpu_freq, min_gpu_freq;
|
||||
|
||||
if (!HAS_LLC(dev_priv)) {
|
||||
seq_puts(m, "unsupported on this chipset\n");
|
||||
return 0;
|
||||
}
|
||||
if (!HAS_LLC(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
|
@ -2253,8 +2233,8 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
|
|||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct drm_printer p;
|
||||
|
||||
if (!HAS_HUC_UCODE(dev_priv))
|
||||
return 0;
|
||||
if (!HAS_HUC(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
p = drm_seq_file_printer(m);
|
||||
intel_uc_fw_dump(&dev_priv->huc.fw, &p);
|
||||
|
@ -2272,8 +2252,8 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
|
|||
struct drm_printer p;
|
||||
u32 tmp, i;
|
||||
|
||||
if (!HAS_GUC_UCODE(dev_priv))
|
||||
return 0;
|
||||
if (!HAS_GUC(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
p = drm_seq_file_printer(m);
|
||||
intel_uc_fw_dump(&dev_priv->guc.fw, &p);
|
||||
|
@ -2346,29 +2326,16 @@ static void i915_guc_client_info(struct seq_file *m,
|
|||
seq_printf(m, "\tTotal: %llu\n", tot);
|
||||
}
|
||||
|
||||
static bool check_guc_submission(struct seq_file *m)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
const struct intel_guc *guc = &dev_priv->guc;
|
||||
|
||||
if (!guc->execbuf_client) {
|
||||
seq_printf(m, "GuC submission %s\n",
|
||||
HAS_GUC_SCHED(dev_priv) ?
|
||||
"disabled" :
|
||||
"not supported");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int i915_guc_info(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
const struct intel_guc *guc = &dev_priv->guc;
|
||||
|
||||
if (!check_guc_submission(m))
|
||||
return 0;
|
||||
if (!USES_GUC_SUBMISSION(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
GEM_BUG_ON(!guc->execbuf_client);
|
||||
GEM_BUG_ON(!guc->preempt_client);
|
||||
|
||||
seq_printf(m, "Doorbell map:\n");
|
||||
seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
|
||||
|
@ -2395,8 +2362,8 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
|
|||
unsigned int tmp;
|
||||
int index;
|
||||
|
||||
if (!check_guc_submission(m))
|
||||
return 0;
|
||||
if (!USES_GUC_SUBMISSION(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
|
||||
struct intel_engine_cs *engine;
|
||||
|
@ -2449,6 +2416,9 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
|
|||
u32 *log;
|
||||
int i = 0;
|
||||
|
||||
if (!HAS_GUC(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
if (dump_load_err)
|
||||
obj = dev_priv->guc.load_err_log;
|
||||
else if (dev_priv->guc.log.vma)
|
||||
|
@ -2480,6 +2450,9 @@ static int i915_guc_log_control_get(void *data, u64 *val)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = data;
|
||||
|
||||
if (!HAS_GUC(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
if (!dev_priv->guc.log.vma)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -2493,6 +2466,9 @@ static int i915_guc_log_control_set(void *data, u64 val)
|
|||
struct drm_i915_private *dev_priv = data;
|
||||
int ret;
|
||||
|
||||
if (!HAS_GUC(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
if (!dev_priv->guc.log.vma)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -2543,10 +2519,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
|||
enum pipe pipe;
|
||||
bool enabled = false;
|
||||
|
||||
if (!HAS_PSR(dev_priv)) {
|
||||
seq_puts(m, "PSR not supported\n");
|
||||
return 0;
|
||||
}
|
||||
if (!HAS_PSR(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
|
@ -2785,10 +2759,8 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
|
|||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct intel_csr *csr;
|
||||
|
||||
if (!HAS_CSR(dev_priv)) {
|
||||
seq_puts(m, "not supported\n");
|
||||
return 0;
|
||||
}
|
||||
if (!HAS_CSR(dev_priv))
|
||||
return -ENODEV;
|
||||
|
||||
csr = &dev_priv->csr;
|
||||
|
||||
|
@ -3324,7 +3296,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
|
|||
int plane;
|
||||
|
||||
if (INTEL_GEN(dev_priv) < 9)
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
|
||||
|
|
|
@ -931,8 +931,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
|
|||
|
||||
intel_display_crc_init(dev_priv);
|
||||
|
||||
intel_device_info_dump(dev_priv);
|
||||
|
||||
intel_detect_preproduction_hw(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
@ -1084,7 +1082,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
if (i915_inject_load_failure())
|
||||
return -ENODEV;
|
||||
|
||||
intel_device_info_runtime_init(dev_priv);
|
||||
intel_device_info_runtime_init(mkwrite_device_info(dev_priv));
|
||||
|
||||
intel_sanitize_options(dev_priv);
|
||||
|
||||
|
@ -1294,6 +1292,21 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
|
|||
i915_gem_shrinker_unregister(dev_priv);
|
||||
}
|
||||
|
||||
static void i915_welcome_messages(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (drm_debug & DRM_UT_DRIVER) {
|
||||
struct drm_printer p = drm_debug_printer("i915 device info:");
|
||||
|
||||
intel_device_info_dump(&dev_priv->info, &p);
|
||||
intel_device_info_dump_runtime(&dev_priv->info, &p);
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
|
||||
DRM_INFO("DRM_I915_DEBUG enabled\n");
|
||||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
|
||||
DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_driver_load - setup chip and create an initial config
|
||||
* @pdev: PCI device
|
||||
|
@ -1379,13 +1392,10 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
|
||||
intel_init_ipc(dev_priv);
|
||||
|
||||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
|
||||
DRM_INFO("DRM_I915_DEBUG enabled\n");
|
||||
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
|
||||
DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
|
||||
i915_welcome_messages(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
||||
out_cleanup_hw:
|
||||
|
@ -1924,9 +1934,6 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
|
|||
goto taint;
|
||||
}
|
||||
|
||||
i915_gem_reset(i915);
|
||||
intel_overlay_reset(i915);
|
||||
|
||||
/* Ok, now get things going again... */
|
||||
|
||||
/*
|
||||
|
@ -1939,6 +1946,9 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
|
|||
goto error;
|
||||
}
|
||||
|
||||
i915_gem_reset(i915);
|
||||
intel_overlay_reset(i915);
|
||||
|
||||
/*
|
||||
* Next we need to restore the context, but we don't use those
|
||||
* yet either...
|
||||
|
@ -2011,19 +2021,19 @@ int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
|
|||
|
||||
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
|
||||
|
||||
active_request = i915_gem_reset_prepare_engine(engine);
|
||||
if (IS_ERR_OR_NULL(active_request)) {
|
||||
/* Either the previous reset failed, or we pardon the reset. */
|
||||
ret = PTR_ERR(active_request);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!(flags & I915_RESET_QUIET)) {
|
||||
dev_notice(engine->i915->drm.dev,
|
||||
"Resetting %s after gpu hang\n", engine->name);
|
||||
}
|
||||
error->reset_engine_count[engine->id]++;
|
||||
|
||||
active_request = i915_gem_reset_prepare_engine(engine);
|
||||
if (IS_ERR(active_request)) {
|
||||
DRM_DEBUG_DRIVER("Previous reset failed, promote to full reset\n");
|
||||
ret = PTR_ERR(active_request);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!engine->i915->guc.execbuf_client)
|
||||
ret = intel_gt_reset_engine(engine->i915, engine);
|
||||
else
|
||||
|
|
|
@ -56,12 +56,15 @@
|
|||
#include "i915_reg.h"
|
||||
#include "i915_utils.h"
|
||||
|
||||
#include "intel_uncore.h"
|
||||
#include "intel_bios.h"
|
||||
#include "intel_device_info.h"
|
||||
#include "intel_display.h"
|
||||
#include "intel_dpll_mgr.h"
|
||||
#include "intel_uc.h"
|
||||
#include "intel_lrc.h"
|
||||
#include "intel_opregion.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
#include "intel_uncore.h"
|
||||
#include "intel_uc.h"
|
||||
|
||||
#include "i915_gem.h"
|
||||
#include "i915_gem_context.h"
|
||||
|
@ -80,8 +83,8 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20171214"
|
||||
#define DRIVER_TIMESTAMP 1513282202
|
||||
#define DRIVER_DATE "20171222"
|
||||
#define DRIVER_TIMESTAMP 1513971710
|
||||
|
||||
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
|
||||
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
|
||||
|
@ -243,174 +246,6 @@ static inline uint_fixed_16_16_t add_fixed16_u32(uint_fixed_16_16_t add1,
|
|||
return clamp_u64_to_fixed16(interm_sum);
|
||||
}
|
||||
|
||||
static inline const char *yesno(bool v)
|
||||
{
|
||||
return v ? "yes" : "no";
|
||||
}
|
||||
|
||||
static inline const char *onoff(bool v)
|
||||
{
|
||||
return v ? "on" : "off";
|
||||
}
|
||||
|
||||
static inline const char *enableddisabled(bool v)
|
||||
{
|
||||
return v ? "enabled" : "disabled";
|
||||
}
|
||||
|
||||
enum pipe {
|
||||
INVALID_PIPE = -1,
|
||||
PIPE_A = 0,
|
||||
PIPE_B,
|
||||
PIPE_C,
|
||||
_PIPE_EDP,
|
||||
I915_MAX_PIPES = _PIPE_EDP
|
||||
};
|
||||
#define pipe_name(p) ((p) + 'A')
|
||||
|
||||
enum transcoder {
|
||||
TRANSCODER_A = 0,
|
||||
TRANSCODER_B,
|
||||
TRANSCODER_C,
|
||||
TRANSCODER_EDP,
|
||||
TRANSCODER_DSI_A,
|
||||
TRANSCODER_DSI_C,
|
||||
I915_MAX_TRANSCODERS
|
||||
};
|
||||
|
||||
static inline const char *transcoder_name(enum transcoder transcoder)
|
||||
{
|
||||
switch (transcoder) {
|
||||
case TRANSCODER_A:
|
||||
return "A";
|
||||
case TRANSCODER_B:
|
||||
return "B";
|
||||
case TRANSCODER_C:
|
||||
return "C";
|
||||
case TRANSCODER_EDP:
|
||||
return "EDP";
|
||||
case TRANSCODER_DSI_A:
|
||||
return "DSI A";
|
||||
case TRANSCODER_DSI_C:
|
||||
return "DSI C";
|
||||
default:
|
||||
return "<invalid>";
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool transcoder_is_dsi(enum transcoder transcoder)
|
||||
{
|
||||
return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
|
||||
}
|
||||
|
||||
/*
|
||||
* Global legacy plane identifier. Valid only for primary/sprite
|
||||
* planes on pre-g4x, and only for primary planes on g4x-bdw.
|
||||
*/
|
||||
enum i9xx_plane_id {
|
||||
PLANE_A,
|
||||
PLANE_B,
|
||||
PLANE_C,
|
||||
};
|
||||
#define plane_name(p) ((p) + 'A')
|
||||
|
||||
#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
|
||||
|
||||
/*
|
||||
* Per-pipe plane identifier.
|
||||
* I915_MAX_PLANES in the enum below is the maximum (across all platforms)
|
||||
* number of planes per CRTC. Not all platforms really have this many planes,
|
||||
* which means some arrays of size I915_MAX_PLANES may have unused entries
|
||||
* between the topmost sprite plane and the cursor plane.
|
||||
*
|
||||
* This is expected to be passed to various register macros
|
||||
* (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
|
||||
*/
|
||||
enum plane_id {
|
||||
PLANE_PRIMARY,
|
||||
PLANE_SPRITE0,
|
||||
PLANE_SPRITE1,
|
||||
PLANE_SPRITE2,
|
||||
PLANE_CURSOR,
|
||||
I915_MAX_PLANES,
|
||||
};
|
||||
|
||||
#define for_each_plane_id_on_crtc(__crtc, __p) \
|
||||
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
|
||||
for_each_if ((__crtc)->plane_ids_mask & BIT(__p))
|
||||
|
||||
enum port {
|
||||
PORT_NONE = -1,
|
||||
PORT_A = 0,
|
||||
PORT_B,
|
||||
PORT_C,
|
||||
PORT_D,
|
||||
PORT_E,
|
||||
I915_MAX_PORTS
|
||||
};
|
||||
#define port_name(p) ((p) + 'A')
|
||||
|
||||
#define I915_NUM_PHYS_VLV 2
|
||||
|
||||
enum dpio_channel {
|
||||
DPIO_CH0,
|
||||
DPIO_CH1
|
||||
};
|
||||
|
||||
enum dpio_phy {
|
||||
DPIO_PHY0,
|
||||
DPIO_PHY1,
|
||||
DPIO_PHY2,
|
||||
};
|
||||
|
||||
enum intel_display_power_domain {
|
||||
POWER_DOMAIN_PIPE_A,
|
||||
POWER_DOMAIN_PIPE_B,
|
||||
POWER_DOMAIN_PIPE_C,
|
||||
POWER_DOMAIN_PIPE_A_PANEL_FITTER,
|
||||
POWER_DOMAIN_PIPE_B_PANEL_FITTER,
|
||||
POWER_DOMAIN_PIPE_C_PANEL_FITTER,
|
||||
POWER_DOMAIN_TRANSCODER_A,
|
||||
POWER_DOMAIN_TRANSCODER_B,
|
||||
POWER_DOMAIN_TRANSCODER_C,
|
||||
POWER_DOMAIN_TRANSCODER_EDP,
|
||||
POWER_DOMAIN_TRANSCODER_DSI_A,
|
||||
POWER_DOMAIN_TRANSCODER_DSI_C,
|
||||
POWER_DOMAIN_PORT_DDI_A_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_B_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_C_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_D_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_E_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_A_IO,
|
||||
POWER_DOMAIN_PORT_DDI_B_IO,
|
||||
POWER_DOMAIN_PORT_DDI_C_IO,
|
||||
POWER_DOMAIN_PORT_DDI_D_IO,
|
||||
POWER_DOMAIN_PORT_DDI_E_IO,
|
||||
POWER_DOMAIN_PORT_DSI,
|
||||
POWER_DOMAIN_PORT_CRT,
|
||||
POWER_DOMAIN_PORT_OTHER,
|
||||
POWER_DOMAIN_VGA,
|
||||
POWER_DOMAIN_AUDIO,
|
||||
POWER_DOMAIN_PLLS,
|
||||
POWER_DOMAIN_AUX_A,
|
||||
POWER_DOMAIN_AUX_B,
|
||||
POWER_DOMAIN_AUX_C,
|
||||
POWER_DOMAIN_AUX_D,
|
||||
POWER_DOMAIN_GMBUS,
|
||||
POWER_DOMAIN_MODESET,
|
||||
POWER_DOMAIN_GT_IRQ,
|
||||
POWER_DOMAIN_INIT,
|
||||
|
||||
POWER_DOMAIN_NUM,
|
||||
};
|
||||
|
||||
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
|
||||
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
|
||||
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
|
||||
#define POWER_DOMAIN_TRANSCODER(tran) \
|
||||
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
|
||||
(tran) + POWER_DOMAIN_TRANSCODER_A)
|
||||
|
||||
enum hpd_pin {
|
||||
HPD_NONE = 0,
|
||||
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
|
||||
|
@ -472,121 +307,6 @@ struct i915_hotplug {
|
|||
I915_GEM_DOMAIN_INSTRUCTION | \
|
||||
I915_GEM_DOMAIN_VERTEX)
|
||||
|
||||
#define for_each_pipe(__dev_priv, __p) \
|
||||
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
|
||||
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
|
||||
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
|
||||
for_each_if ((__mask) & (1 << (__p)))
|
||||
#define for_each_universal_plane(__dev_priv, __pipe, __p) \
|
||||
for ((__p) = 0; \
|
||||
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
|
||||
(__p)++)
|
||||
#define for_each_sprite(__dev_priv, __p, __s) \
|
||||
for ((__s) = 0; \
|
||||
(__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
|
||||
(__s)++)
|
||||
|
||||
#define for_each_port_masked(__port, __ports_mask) \
|
||||
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
|
||||
for_each_if ((__ports_mask) & (1 << (__port)))
|
||||
|
||||
#define for_each_crtc(dev, crtc) \
|
||||
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
|
||||
|
||||
#define for_each_intel_plane(dev, intel_plane) \
|
||||
list_for_each_entry(intel_plane, \
|
||||
&(dev)->mode_config.plane_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
|
||||
list_for_each_entry(intel_plane, \
|
||||
&(dev)->mode_config.plane_list, \
|
||||
base.head) \
|
||||
for_each_if ((plane_mask) & \
|
||||
(1 << drm_plane_index(&intel_plane->base)))
|
||||
|
||||
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
|
||||
list_for_each_entry(intel_plane, \
|
||||
&(dev)->mode_config.plane_list, \
|
||||
base.head) \
|
||||
for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
|
||||
|
||||
#define for_each_intel_crtc(dev, intel_crtc) \
|
||||
list_for_each_entry(intel_crtc, \
|
||||
&(dev)->mode_config.crtc_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
|
||||
list_for_each_entry(intel_crtc, \
|
||||
&(dev)->mode_config.crtc_list, \
|
||||
base.head) \
|
||||
for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
|
||||
|
||||
#define for_each_intel_encoder(dev, intel_encoder) \
|
||||
list_for_each_entry(intel_encoder, \
|
||||
&(dev)->mode_config.encoder_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_connector_iter(intel_connector, iter) \
|
||||
while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
|
||||
|
||||
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
|
||||
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
|
||||
for_each_if ((intel_encoder)->base.crtc == (__crtc))
|
||||
|
||||
#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
|
||||
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
|
||||
for_each_if ((intel_connector)->base.encoder == (__encoder))
|
||||
|
||||
#define for_each_power_domain(domain, mask) \
|
||||
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
|
||||
for_each_if (BIT_ULL(domain) & (mask))
|
||||
|
||||
#define for_each_power_well(__dev_priv, __power_well) \
|
||||
for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
|
||||
(__power_well) - (__dev_priv)->power_domains.power_wells < \
|
||||
(__dev_priv)->power_domains.power_well_count; \
|
||||
(__power_well)++)
|
||||
|
||||
#define for_each_power_well_rev(__dev_priv, __power_well) \
|
||||
for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
|
||||
(__dev_priv)->power_domains.power_well_count - 1; \
|
||||
(__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
|
||||
(__power_well)--)
|
||||
|
||||
#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
|
||||
for_each_power_well(__dev_priv, __power_well) \
|
||||
for_each_if ((__power_well)->domains & (__domain_mask))
|
||||
|
||||
#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
|
||||
for_each_power_well_rev(__dev_priv, __power_well) \
|
||||
for_each_if ((__power_well)->domains & (__domain_mask))
|
||||
|
||||
#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
|
||||
for ((__i) = 0; \
|
||||
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
|
||||
((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
|
||||
(new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
|
||||
(__i)++) \
|
||||
for_each_if (plane)
|
||||
|
||||
#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
|
||||
for ((__i) = 0; \
|
||||
(__i) < (__state)->base.dev->mode_config.num_crtc && \
|
||||
((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
|
||||
(new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
|
||||
(__i)++) \
|
||||
for_each_if (crtc)
|
||||
|
||||
#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
|
||||
for ((__i) = 0; \
|
||||
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
|
||||
((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
|
||||
(old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
|
||||
(new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
|
||||
(__i)++) \
|
||||
for_each_if (plane)
|
||||
|
||||
struct drm_i915_private;
|
||||
struct i915_mm_struct;
|
||||
struct i915_mmu_object;
|
||||
|
@ -623,20 +343,6 @@ struct drm_i915_file_private {
|
|||
atomic_t context_bans;
|
||||
};
|
||||
|
||||
/* Used by dp and fdi links */
|
||||
struct intel_link_m_n {
|
||||
uint32_t tu;
|
||||
uint32_t gmch_m;
|
||||
uint32_t gmch_n;
|
||||
uint32_t link_m;
|
||||
uint32_t link_n;
|
||||
};
|
||||
|
||||
void intel_link_compute_m_n(int bpp, int nlanes,
|
||||
int pixel_clock, int link_clock,
|
||||
struct intel_link_m_n *m_n,
|
||||
bool reduce_m_n);
|
||||
|
||||
/* Interface history:
|
||||
*
|
||||
* 1.1: Original.
|
||||
|
@ -651,27 +357,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
|
|||
#define DRIVER_MINOR 6
|
||||
#define DRIVER_PATCHLEVEL 0
|
||||
|
||||
struct opregion_header;
|
||||
struct opregion_acpi;
|
||||
struct opregion_swsci;
|
||||
struct opregion_asle;
|
||||
|
||||
struct intel_opregion {
|
||||
struct opregion_header *header;
|
||||
struct opregion_acpi *acpi;
|
||||
struct opregion_swsci *swsci;
|
||||
u32 swsci_gbda_sub_functions;
|
||||
u32 swsci_sbcb_sub_functions;
|
||||
struct opregion_asle *asle;
|
||||
void *rvda;
|
||||
void *vbt_firmware;
|
||||
const void *vbt;
|
||||
u32 vbt_size;
|
||||
u32 *lid_state;
|
||||
struct work_struct asle_work;
|
||||
};
|
||||
#define OPREGION_SIZE (8*1024)
|
||||
|
||||
struct intel_overlay;
|
||||
struct intel_overlay_error_state;
|
||||
|
||||
|
@ -764,137 +449,6 @@ struct intel_csr {
|
|||
uint32_t allowed_dc_mask;
|
||||
};
|
||||
|
||||
#define DEV_INFO_FOR_EACH_FLAG(func) \
|
||||
func(is_mobile); \
|
||||
func(is_lp); \
|
||||
func(is_alpha_support); \
|
||||
/* Keep has_* in alphabetical order */ \
|
||||
func(has_64bit_reloc); \
|
||||
func(has_aliasing_ppgtt); \
|
||||
func(has_csr); \
|
||||
func(has_ddi); \
|
||||
func(has_dp_mst); \
|
||||
func(has_reset_engine); \
|
||||
func(has_fbc); \
|
||||
func(has_fpga_dbg); \
|
||||
func(has_full_ppgtt); \
|
||||
func(has_full_48bit_ppgtt); \
|
||||
func(has_gmch_display); \
|
||||
func(has_guc); \
|
||||
func(has_guc_ct); \
|
||||
func(has_hotplug); \
|
||||
func(has_l3_dpf); \
|
||||
func(has_llc); \
|
||||
func(has_logical_ring_contexts); \
|
||||
func(has_logical_ring_preemption); \
|
||||
func(has_overlay); \
|
||||
func(has_pooled_eu); \
|
||||
func(has_psr); \
|
||||
func(has_rc6); \
|
||||
func(has_rc6p); \
|
||||
func(has_resource_streamer); \
|
||||
func(has_runtime_pm); \
|
||||
func(has_snoop); \
|
||||
func(unfenced_needs_alignment); \
|
||||
func(cursor_needs_physical); \
|
||||
func(hws_needs_physical); \
|
||||
func(overlay_needs_physical); \
|
||||
func(supports_tv); \
|
||||
func(has_ipc);
|
||||
|
||||
struct sseu_dev_info {
|
||||
u8 slice_mask;
|
||||
u8 subslice_mask;
|
||||
u8 eu_total;
|
||||
u8 eu_per_subslice;
|
||||
u8 min_eu_in_pool;
|
||||
/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
|
||||
u8 subslice_7eu[3];
|
||||
u8 has_slice_pg:1;
|
||||
u8 has_subslice_pg:1;
|
||||
u8 has_eu_pg:1;
|
||||
};
|
||||
|
||||
static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
|
||||
{
|
||||
return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
|
||||
}
|
||||
|
||||
/* Keep in gen based order, and chronological order within a gen */
|
||||
enum intel_platform {
|
||||
INTEL_PLATFORM_UNINITIALIZED = 0,
|
||||
INTEL_I830,
|
||||
INTEL_I845G,
|
||||
INTEL_I85X,
|
||||
INTEL_I865G,
|
||||
INTEL_I915G,
|
||||
INTEL_I915GM,
|
||||
INTEL_I945G,
|
||||
INTEL_I945GM,
|
||||
INTEL_G33,
|
||||
INTEL_PINEVIEW,
|
||||
INTEL_I965G,
|
||||
INTEL_I965GM,
|
||||
INTEL_G45,
|
||||
INTEL_GM45,
|
||||
INTEL_IRONLAKE,
|
||||
INTEL_SANDYBRIDGE,
|
||||
INTEL_IVYBRIDGE,
|
||||
INTEL_VALLEYVIEW,
|
||||
INTEL_HASWELL,
|
||||
INTEL_BROADWELL,
|
||||
INTEL_CHERRYVIEW,
|
||||
INTEL_SKYLAKE,
|
||||
INTEL_BROXTON,
|
||||
INTEL_KABYLAKE,
|
||||
INTEL_GEMINILAKE,
|
||||
INTEL_COFFEELAKE,
|
||||
INTEL_CANNONLAKE,
|
||||
INTEL_MAX_PLATFORMS
|
||||
};
|
||||
|
||||
struct intel_device_info {
|
||||
u16 device_id;
|
||||
u16 gen_mask;
|
||||
|
||||
u8 gen;
|
||||
u8 gt; /* GT number, 0 if undefined */
|
||||
u8 num_rings;
|
||||
u8 ring_mask; /* Rings supported by the HW */
|
||||
|
||||
enum intel_platform platform;
|
||||
u32 platform_mask;
|
||||
|
||||
u32 display_mmio_offset;
|
||||
|
||||
u8 num_pipes;
|
||||
u8 num_sprites[I915_MAX_PIPES];
|
||||
u8 num_scalers[I915_MAX_PIPES];
|
||||
|
||||
unsigned int page_sizes; /* page sizes supported by the HW */
|
||||
|
||||
#define DEFINE_FLAG(name) u8 name:1
|
||||
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
|
||||
#undef DEFINE_FLAG
|
||||
u16 ddb_size; /* in blocks */
|
||||
|
||||
/* Register offsets for the various display pipes and transcoders */
|
||||
int pipe_offsets[I915_MAX_TRANSCODERS];
|
||||
int trans_offsets[I915_MAX_TRANSCODERS];
|
||||
int palette_offsets[I915_MAX_PIPES];
|
||||
int cursor_offsets[I915_MAX_PIPES];
|
||||
|
||||
/* Slice/subslice/EU info */
|
||||
struct sseu_dev_info sseu;
|
||||
|
||||
u32 cs_timestamp_frequency_khz;
|
||||
|
||||
struct color_luts {
|
||||
u16 degamma_lut_size;
|
||||
u16 gamma_lut_size;
|
||||
} color;
|
||||
};
|
||||
|
||||
struct intel_display_error_state;
|
||||
|
||||
struct i915_gpu_state {
|
||||
|
@ -948,6 +502,7 @@ struct i915_gpu_state {
|
|||
struct drm_i915_error_engine {
|
||||
int engine_id;
|
||||
/* Software tracked state */
|
||||
bool idle;
|
||||
bool waiting;
|
||||
int num_waiters;
|
||||
unsigned long hangcheck_timestamp;
|
||||
|
@ -2405,6 +1960,9 @@ struct drm_i915_private {
|
|||
*/
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
/* ordered wq for modesets */
|
||||
struct workqueue_struct *modeset_wq;
|
||||
|
||||
/* Display functions */
|
||||
struct drm_i915_display_funcs display;
|
||||
|
||||
|
@ -4111,41 +3669,6 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
|
|||
bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
|
||||
enum port port);
|
||||
|
||||
|
||||
/* intel_opregion.c */
|
||||
#ifdef CONFIG_ACPI
|
||||
extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
|
||||
extern void intel_opregion_register(struct drm_i915_private *dev_priv);
|
||||
extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
|
||||
extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
|
||||
extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
|
||||
bool enable);
|
||||
extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
|
||||
pci_power_t state);
|
||||
extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
|
||||
#else
|
||||
static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
|
||||
static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
|
||||
static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
|
||||
static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
static inline int
|
||||
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int
|
||||
intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* intel_acpi.c */
|
||||
#ifdef CONFIG_ACPI
|
||||
extern void intel_register_dsm_handler(void);
|
||||
|
@ -4162,10 +3685,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv)
|
|||
return (struct intel_device_info *)&dev_priv->info;
|
||||
}
|
||||
|
||||
const char *intel_platform_name(enum intel_platform platform);
|
||||
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
|
||||
void intel_device_info_dump(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* modesetting */
|
||||
extern void intel_modeset_init_hw(struct drm_device *dev);
|
||||
extern int intel_modeset_init(struct drm_device *dev);
|
||||
|
|
|
@ -2596,7 +2596,7 @@ static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
|||
}
|
||||
|
||||
err = obj->ops->get_pages(obj);
|
||||
GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
|
||||
GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -3089,7 +3089,12 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
|
|||
void i915_gem_reset_engine(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_request *request)
|
||||
{
|
||||
engine->irq_posted = 0;
|
||||
/*
|
||||
* Make sure this write is visible before we re-enable the interrupt
|
||||
* handlers on another CPU, as tasklet_enable() resolves to just
|
||||
* a compiler barrier which is insufficient for our purpose here.
|
||||
*/
|
||||
smp_store_mb(engine->irq_posted, 0);
|
||||
|
||||
if (request)
|
||||
request = i915_gem_reset_request(engine, request);
|
||||
|
@ -3119,6 +3124,25 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
|
|||
ctx = fetch_and_zero(&engine->last_retired_context);
|
||||
if (ctx)
|
||||
engine->context_unpin(engine, ctx);
|
||||
|
||||
/*
|
||||
* Ostensibily, we always want a context loaded for powersaving,
|
||||
* so if the engine is idle after the reset, send a request
|
||||
* to load our scratch kernel_context.
|
||||
*
|
||||
* More mysteriously, if we leave the engine idle after a reset,
|
||||
* the next userspace batch may hang, with what appears to be
|
||||
* an incoherent read by the CS (presumably stale TLB). An
|
||||
* empty request appears sufficient to paper over the glitch.
|
||||
*/
|
||||
if (list_empty(&engine->timeline->requests)) {
|
||||
struct drm_i915_gem_request *rq;
|
||||
|
||||
rq = i915_gem_request_alloc(engine,
|
||||
dev_priv->kernel_context);
|
||||
if (!IS_ERR(rq))
|
||||
__i915_add_request(rq, false);
|
||||
}
|
||||
}
|
||||
|
||||
i915_gem_restore_fences(dev_priv);
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "i915_drv.h"
|
||||
|
||||
#define QUIET (__GFP_NORETRY | __GFP_NOWARN)
|
||||
#define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
|
||||
|
||||
/* convert swiotlb segment size into sensible units (pages)! */
|
||||
#define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
|
||||
|
@ -95,7 +96,8 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
|
|||
struct page *page;
|
||||
|
||||
do {
|
||||
page = alloc_pages(gfp | (order ? QUIET : 0), order);
|
||||
page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
|
||||
order);
|
||||
if (page)
|
||||
break;
|
||||
if (!order--)
|
||||
|
|
|
@ -479,6 +479,7 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
|
|||
/* Transfer from per-context onto the global per-engine timeline */
|
||||
timeline = engine->timeline;
|
||||
GEM_BUG_ON(timeline == request->timeline);
|
||||
GEM_BUG_ON(request->global_seqno);
|
||||
|
||||
seqno = timeline_get_seqno(timeline);
|
||||
GEM_BUG_ON(!seqno);
|
||||
|
@ -525,6 +526,7 @@ void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
|
|||
/* Only unwind in reverse order, required so that the per-context list
|
||||
* is kept in seqno/ring order.
|
||||
*/
|
||||
GEM_BUG_ON(!request->global_seqno);
|
||||
GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
|
||||
engine->timeline->seqno--;
|
||||
|
||||
|
|
|
@ -416,6 +416,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
|
|||
int n;
|
||||
|
||||
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
|
||||
err_printf(m, " IDLE?: %s\n", yesno(ee->idle));
|
||||
err_printf(m, " START: 0x%08x\n", ee->start);
|
||||
err_printf(m, " HEAD: 0x%08x [0x%08x]\n", ee->head, ee->rq_head);
|
||||
err_printf(m, " TAIL: 0x%08x [0x%08x, 0x%08x]\n",
|
||||
|
@ -564,34 +565,17 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
|
|||
static void err_print_capabilities(struct drm_i915_error_state_buf *m,
|
||||
const struct intel_device_info *info)
|
||||
{
|
||||
#define PRINT_FLAG(x) err_printf(m, #x ": %s\n", yesno(info->x))
|
||||
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
|
||||
#undef PRINT_FLAG
|
||||
}
|
||||
struct drm_printer p = i915_error_printer(m);
|
||||
|
||||
static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
|
||||
const char *name,
|
||||
const char *type,
|
||||
const void *x)
|
||||
{
|
||||
if (!__builtin_strcmp(type, "bool"))
|
||||
err_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
|
||||
else if (!__builtin_strcmp(type, "int"))
|
||||
err_printf(m, "i915.%s=%d\n", name, *(const int *)x);
|
||||
else if (!__builtin_strcmp(type, "unsigned int"))
|
||||
err_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
|
||||
else if (!__builtin_strcmp(type, "char *"))
|
||||
err_printf(m, "i915.%s=%s\n", name, *(const char **)x);
|
||||
else
|
||||
BUILD_BUG();
|
||||
intel_device_info_dump_flags(info, &p);
|
||||
}
|
||||
|
||||
static void err_print_params(struct drm_i915_error_state_buf *m,
|
||||
const struct i915_params *p)
|
||||
const struct i915_params *params)
|
||||
{
|
||||
#define PRINT(T, x, ...) err_print_param(m, #x, #T, &p->x);
|
||||
I915_PARAMS_FOR_EACH(PRINT);
|
||||
#undef PRINT
|
||||
struct drm_printer p = i915_error_printer(m);
|
||||
|
||||
i915_params_dump(params, &p);
|
||||
}
|
||||
|
||||
static void err_print_pciid(struct drm_i915_error_state_buf *m,
|
||||
|
@ -1256,6 +1240,7 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
|
|||
ee->hws = I915_READ(mmio);
|
||||
}
|
||||
|
||||
ee->idle = intel_engine_is_idle(engine);
|
||||
ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
|
||||
ee->hangcheck_action = engine->hangcheck.action;
|
||||
ee->hangcheck_stalled = engine->hangcheck.stalled;
|
||||
|
|
|
@ -96,6 +96,11 @@ bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len)
|
|||
|
||||
void i915_memcpy_init_early(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (static_cpu_has(X86_FEATURE_XMM4_1))
|
||||
/*
|
||||
* Some hypervisors (e.g. KVM) don't support VEX-prefix instructions
|
||||
* emulation. So don't enable movntdqa in hypervisor guest.
|
||||
*/
|
||||
if (static_cpu_has(X86_FEATURE_XMM4_1) &&
|
||||
!boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
static_branch_enable(&has_movntdqa);
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
* IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "i915_params.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
|
@ -172,3 +174,34 @@ i915_param_named(enable_dpcd_backlight, bool, 0600,
|
|||
|
||||
i915_param_named(enable_gvt, bool, 0400,
|
||||
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
|
||||
|
||||
static __always_inline void _print_param(struct drm_printer *p,
|
||||
const char *name,
|
||||
const char *type,
|
||||
const void *x)
|
||||
{
|
||||
if (!__builtin_strcmp(type, "bool"))
|
||||
drm_printf(p, "i915.%s=%s\n", name, yesno(*(const bool *)x));
|
||||
else if (!__builtin_strcmp(type, "int"))
|
||||
drm_printf(p, "i915.%s=%d\n", name, *(const int *)x);
|
||||
else if (!__builtin_strcmp(type, "unsigned int"))
|
||||
drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x);
|
||||
else if (!__builtin_strcmp(type, "char *"))
|
||||
drm_printf(p, "i915.%s=%s\n", name, *(const char **)x);
|
||||
else
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_params_dump - dump i915 modparams
|
||||
* @params: i915 modparams
|
||||
* @p: the &drm_printer
|
||||
*
|
||||
* Pretty printer for i915 modparams.
|
||||
*/
|
||||
void i915_params_dump(const struct i915_params *params, struct drm_printer *p)
|
||||
{
|
||||
#define PRINT(T, x, ...) _print_param(p, #x, #T, ¶ms->x);
|
||||
I915_PARAMS_FOR_EACH(PRINT);
|
||||
#undef PRINT
|
||||
}
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
#include <linux/bitops.h>
|
||||
#include <linux/cache.h> /* for __read_mostly */
|
||||
|
||||
struct drm_printer;
|
||||
|
||||
#define ENABLE_GUC_SUBMISSION BIT(0)
|
||||
#define ENABLE_GUC_LOAD_HUC BIT(1)
|
||||
|
||||
|
@ -77,5 +79,7 @@ struct i915_params {
|
|||
|
||||
extern struct i915_params i915_modparams __read_mostly;
|
||||
|
||||
void i915_params_dump(const struct i915_params *params, struct drm_printer *p);
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -633,6 +633,8 @@ static const struct pci_device_id pciidlist[] = {
|
|||
INTEL_CFL_S_GT1_IDS(&intel_coffeelake_gt1_info),
|
||||
INTEL_CFL_S_GT2_IDS(&intel_coffeelake_gt2_info),
|
||||
INTEL_CFL_H_GT2_IDS(&intel_coffeelake_gt2_info),
|
||||
INTEL_CFL_U_GT1_IDS(&intel_coffeelake_gt1_info),
|
||||
INTEL_CFL_U_GT2_IDS(&intel_coffeelake_gt2_info),
|
||||
INTEL_CFL_U_GT3_IDS(&intel_coffeelake_gt3_info),
|
||||
INTEL_CNL_U_GT2_IDS(&intel_cannonlake_gt2_info),
|
||||
INTEL_CNL_Y_GT2_IDS(&intel_cannonlake_gt2_info),
|
||||
|
|
|
@ -3278,6 +3278,7 @@ enum i915_power_well_id {
|
|||
# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
|
||||
# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
|
||||
# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
|
||||
# define PNV_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 24) /* pnv */
|
||||
# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
|
||||
# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
|
||||
# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
|
||||
|
@ -3858,6 +3859,9 @@ enum {
|
|||
#define PWM2_GATING_DIS (1 << 14)
|
||||
#define PWM1_GATING_DIS (1 << 13)
|
||||
|
||||
#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
|
||||
#define BXT_GMBUS_GATING_DIS (1 << 14)
|
||||
|
||||
#define _CLKGATE_DIS_PSL_A 0x46520
|
||||
#define _CLKGATE_DIS_PSL_B 0x46524
|
||||
#define _CLKGATE_DIS_PSL_C 0x46528
|
||||
|
@ -3875,6 +3879,9 @@ enum {
|
|||
#define SARBUNIT_CLKGATE_DIS (1 << 5)
|
||||
#define RCCUNIT_CLKGATE_DIS (1 << 7)
|
||||
|
||||
#define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434)
|
||||
#define VFUNIT_CLKGATE_DIS (1 << 20)
|
||||
|
||||
/*
|
||||
* Display engine regs
|
||||
*/
|
||||
|
@ -6329,6 +6336,7 @@ enum {
|
|||
#define PLANE_CTL_TILED_X ( 1 << 10)
|
||||
#define PLANE_CTL_TILED_Y ( 4 << 10)
|
||||
#define PLANE_CTL_TILED_YF ( 5 << 10)
|
||||
#define PLANE_CTL_FLIP_HORIZONTAL ( 1 << 8)
|
||||
#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */
|
||||
#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
|
||||
#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
|
||||
|
@ -7552,6 +7560,7 @@ enum {
|
|||
#define FDI_RX_CHICKEN(pipe) _MMIO_PIPE(pipe, _FDI_RXA_CHICKEN, _FDI_RXB_CHICKEN)
|
||||
|
||||
#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
|
||||
#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1<<31)
|
||||
#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1<<30)
|
||||
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
|
||||
#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1<<14)
|
||||
|
@ -8142,6 +8151,7 @@ enum {
|
|||
#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
|
||||
#define STALL_DOP_GATING_DISABLE (1<<5)
|
||||
#define THROTTLE_12_5 (7<<2)
|
||||
#define DISABLE_EARLY_EOT (1<<1)
|
||||
|
||||
#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
|
||||
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
|
||||
|
|
|
@ -616,6 +616,7 @@ TRACE_EVENT(i915_gem_request_queue,
|
|||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(u32, hw_id)
|
||||
__field(u32, ring)
|
||||
__field(u32, ctx)
|
||||
__field(u32, seqno)
|
||||
|
@ -624,15 +625,16 @@ TRACE_EVENT(i915_gem_request_queue,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->hw_id = req->ctx->hw_id;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->ctx = req->fence.context;
|
||||
__entry->seqno = req->fence.seqno;
|
||||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
|
||||
__entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
|
||||
__entry->flags)
|
||||
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
|
||||
__entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
|
||||
__entry->seqno, __entry->flags)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(i915_gem_request,
|
||||
|
@ -641,23 +643,25 @@ DECLARE_EVENT_CLASS(i915_gem_request,
|
|||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(u32, ctx)
|
||||
__field(u32, hw_id)
|
||||
__field(u32, ring)
|
||||
__field(u32, ctx)
|
||||
__field(u32, seqno)
|
||||
__field(u32, global)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->hw_id = req->ctx->hw_id;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->ctx = req->fence.context;
|
||||
__entry->seqno = req->fence.seqno;
|
||||
__entry->global = req->global_seqno;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
|
||||
__entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
|
||||
__entry->global)
|
||||
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
|
||||
__entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
|
||||
__entry->seqno, __entry->global)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
|
||||
|
@ -683,15 +687,17 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
|
|||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(u32, hw_id)
|
||||
__field(u32, ring)
|
||||
__field(u32, ctx)
|
||||
__field(u32, seqno)
|
||||
__field(u32, global_seqno)
|
||||
__field(u32, ctx)
|
||||
__field(u32, port)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->hw_id = req->ctx->hw_id;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->ctx = req->fence.context;
|
||||
__entry->seqno = req->fence.seqno;
|
||||
|
@ -699,10 +705,10 @@ DECLARE_EVENT_CLASS(i915_gem_request_hw,
|
|||
__entry->port = port;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
|
||||
__entry->dev, __entry->ring, __entry->ctx,
|
||||
__entry->seqno, __entry->global_seqno,
|
||||
__entry->port)
|
||||
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
|
||||
__entry->dev, __entry->hw_id, __entry->ring,
|
||||
__entry->ctx, __entry->seqno,
|
||||
__entry->global_seqno, __entry->port)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in,
|
||||
|
@ -772,6 +778,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
|
|||
|
||||
TP_STRUCT__entry(
|
||||
__field(u32, dev)
|
||||
__field(u32, hw_id)
|
||||
__field(u32, ring)
|
||||
__field(u32, ctx)
|
||||
__field(u32, seqno)
|
||||
|
@ -787,6 +794,7 @@ TRACE_EVENT(i915_gem_request_wait_begin,
|
|||
*/
|
||||
TP_fast_assign(
|
||||
__entry->dev = req->i915->drm.primary->index;
|
||||
__entry->hw_id = req->ctx->hw_id;
|
||||
__entry->ring = req->engine->id;
|
||||
__entry->ctx = req->fence.context;
|
||||
__entry->seqno = req->fence.seqno;
|
||||
|
@ -794,10 +802,10 @@ TRACE_EVENT(i915_gem_request_wait_begin,
|
|||
__entry->flags = flags;
|
||||
),
|
||||
|
||||
TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
|
||||
__entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
|
||||
__entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
|
||||
__entry->flags)
|
||||
TP_printk("dev=%u, hw_id=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
|
||||
__entry->dev, __entry->hw_id, __entry->ring, __entry->ctx,
|
||||
__entry->seqno, __entry->global,
|
||||
!!(__entry->flags & I915_WAIT_LOCKED), __entry->flags)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
|
||||
|
|
|
@ -140,4 +140,19 @@ static inline void drain_delayed_work(struct delayed_work *dw)
|
|||
} while (delayed_work_pending(dw));
|
||||
}
|
||||
|
||||
static inline const char *yesno(bool v)
|
||||
{
|
||||
return v ? "yes" : "no";
|
||||
}
|
||||
|
||||
static inline const char *onoff(bool v)
|
||||
{
|
||||
return v ? "on" : "off";
|
||||
}
|
||||
|
||||
static inline const char *enableddisabled(bool v)
|
||||
{
|
||||
return v ? "enabled" : "disabled";
|
||||
}
|
||||
|
||||
#endif /* !__I915_UTILS_H */
|
||||
|
|
|
@ -2095,6 +2095,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
|
|||
if (WARN_ON(!pll))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev_priv->dpll_lock);
|
||||
|
||||
if (IS_CANNONLAKE(dev_priv)) {
|
||||
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
|
||||
val = I915_READ(DPCLKA_CFGCR0);
|
||||
|
@ -2115,7 +2117,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
|
|||
val = I915_READ(DPLL_CTRL2);
|
||||
|
||||
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
|
||||
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
|
||||
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
|
||||
val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->id, port) |
|
||||
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
|
||||
|
||||
|
@ -2124,6 +2126,8 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
|
|||
} else if (INTEL_INFO(dev_priv)->gen < 9) {
|
||||
I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
|
||||
}
|
||||
|
||||
mutex_unlock(&dev_priv->dpll_lock);
|
||||
}
|
||||
|
||||
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include "intel_device_info.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
#define PLATFORM_NAME(x) [INTEL_##x] = #x
|
||||
|
@ -67,21 +70,55 @@ const char *intel_platform_name(enum intel_platform platform)
|
|||
return platform_names[platform];
|
||||
}
|
||||
|
||||
void intel_device_info_dump(struct drm_i915_private *dev_priv)
|
||||
void intel_device_info_dump_flags(const struct intel_device_info *info,
|
||||
struct drm_printer *p)
|
||||
{
|
||||
const struct intel_device_info *info = &dev_priv->info;
|
||||
|
||||
DRM_DEBUG_DRIVER("i915 device info: platform=%s gen=%i pciid=0x%04x rev=0x%02x",
|
||||
intel_platform_name(info->platform),
|
||||
info->gen,
|
||||
dev_priv->drm.pdev->device,
|
||||
dev_priv->drm.pdev->revision);
|
||||
#define PRINT_FLAG(name) \
|
||||
DRM_DEBUG_DRIVER("i915 device info: " #name ": %s", yesno(info->name))
|
||||
#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name));
|
||||
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
|
||||
#undef PRINT_FLAG
|
||||
}
|
||||
|
||||
static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p)
|
||||
{
|
||||
drm_printf(p, "slice mask: %04x\n", sseu->slice_mask);
|
||||
drm_printf(p, "slice total: %u\n", hweight8(sseu->slice_mask));
|
||||
drm_printf(p, "subslice total: %u\n", sseu_subslice_total(sseu));
|
||||
drm_printf(p, "subslice mask %04x\n", sseu->subslice_mask);
|
||||
drm_printf(p, "subslice per slice: %u\n",
|
||||
hweight8(sseu->subslice_mask));
|
||||
drm_printf(p, "EU total: %u\n", sseu->eu_total);
|
||||
drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice);
|
||||
drm_printf(p, "has slice power gating: %s\n",
|
||||
yesno(sseu->has_slice_pg));
|
||||
drm_printf(p, "has subslice power gating: %s\n",
|
||||
yesno(sseu->has_subslice_pg));
|
||||
drm_printf(p, "has EU power gating: %s\n", yesno(sseu->has_eu_pg));
|
||||
}
|
||||
|
||||
void intel_device_info_dump_runtime(const struct intel_device_info *info,
|
||||
struct drm_printer *p)
|
||||
{
|
||||
sseu_dump(&info->sseu, p);
|
||||
|
||||
drm_printf(p, "CS timestamp frequency: %u kHz\n",
|
||||
info->cs_timestamp_frequency_khz);
|
||||
}
|
||||
|
||||
void intel_device_info_dump(const struct intel_device_info *info,
|
||||
struct drm_printer *p)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(info, struct drm_i915_private, info);
|
||||
|
||||
drm_printf(p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
|
||||
INTEL_DEVID(dev_priv),
|
||||
INTEL_REVID(dev_priv),
|
||||
intel_platform_name(info->platform),
|
||||
info->gen);
|
||||
|
||||
intel_device_info_dump_flags(info, p);
|
||||
}
|
||||
|
||||
static void gen10_sseu_info_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct sseu_dev_info *sseu = &mkwrite_device_info(dev_priv)->sseu;
|
||||
|
@ -420,7 +457,10 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* intel_device_info_runtime_init - initialize runtime info
|
||||
* @info: intel device info struct
|
||||
*
|
||||
* Determine various intel_device_info fields at runtime.
|
||||
*
|
||||
* Use it when either:
|
||||
|
@ -433,9 +473,10 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
|
|||
* - after the PCH has been detected,
|
||||
* - before the first usage of the fields it can tweak.
|
||||
*/
|
||||
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
|
||||
void intel_device_info_runtime_init(struct intel_device_info *info)
|
||||
{
|
||||
struct intel_device_info *info = mkwrite_device_info(dev_priv);
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(info, struct drm_i915_private, info);
|
||||
enum pipe pipe;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 10) {
|
||||
|
@ -543,22 +584,4 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
/* Initialize command stream timestamp frequency */
|
||||
info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
|
||||
|
||||
DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
|
||||
DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
|
||||
DRM_DEBUG_DRIVER("subslice total: %u\n",
|
||||
sseu_subslice_total(&info->sseu));
|
||||
DRM_DEBUG_DRIVER("subslice mask %04x\n", info->sseu.subslice_mask);
|
||||
DRM_DEBUG_DRIVER("subslice per slice: %u\n",
|
||||
hweight8(info->sseu.subslice_mask));
|
||||
DRM_DEBUG_DRIVER("EU total: %u\n", info->sseu.eu_total);
|
||||
DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->sseu.eu_per_subslice);
|
||||
DRM_DEBUG_DRIVER("has slice power gating: %s\n",
|
||||
info->sseu.has_slice_pg ? "y" : "n");
|
||||
DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
|
||||
info->sseu.has_subslice_pg ? "y" : "n");
|
||||
DRM_DEBUG_DRIVER("has EU power gating: %s\n",
|
||||
info->sseu.has_eu_pg ? "y" : "n");
|
||||
DRM_DEBUG_DRIVER("CS timestamp frequency: %u kHz\n",
|
||||
info->cs_timestamp_frequency_khz);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,183 @@
|
|||
/*
|
||||
* Copyright © 2014-2017 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_DEVICE_INFO_H_
|
||||
#define _INTEL_DEVICE_INFO_H_
|
||||
|
||||
#include "intel_display.h"
|
||||
|
||||
struct drm_printer;
|
||||
struct drm_i915_private;
|
||||
|
||||
/* Keep in gen based order, and chronological order within a gen */
|
||||
enum intel_platform {
|
||||
INTEL_PLATFORM_UNINITIALIZED = 0,
|
||||
/* gen2 */
|
||||
INTEL_I830,
|
||||
INTEL_I845G,
|
||||
INTEL_I85X,
|
||||
INTEL_I865G,
|
||||
/* gen3 */
|
||||
INTEL_I915G,
|
||||
INTEL_I915GM,
|
||||
INTEL_I945G,
|
||||
INTEL_I945GM,
|
||||
INTEL_G33,
|
||||
INTEL_PINEVIEW,
|
||||
/* gen4 */
|
||||
INTEL_I965G,
|
||||
INTEL_I965GM,
|
||||
INTEL_G45,
|
||||
INTEL_GM45,
|
||||
/* gen5 */
|
||||
INTEL_IRONLAKE,
|
||||
/* gen6 */
|
||||
INTEL_SANDYBRIDGE,
|
||||
/* gen7 */
|
||||
INTEL_IVYBRIDGE,
|
||||
INTEL_VALLEYVIEW,
|
||||
INTEL_HASWELL,
|
||||
/* gen8 */
|
||||
INTEL_BROADWELL,
|
||||
INTEL_CHERRYVIEW,
|
||||
/* gen9 */
|
||||
INTEL_SKYLAKE,
|
||||
INTEL_BROXTON,
|
||||
INTEL_KABYLAKE,
|
||||
INTEL_GEMINILAKE,
|
||||
INTEL_COFFEELAKE,
|
||||
/* gen10 */
|
||||
INTEL_CANNONLAKE,
|
||||
INTEL_MAX_PLATFORMS
|
||||
};
|
||||
|
||||
#define DEV_INFO_FOR_EACH_FLAG(func) \
|
||||
func(is_mobile); \
|
||||
func(is_lp); \
|
||||
func(is_alpha_support); \
|
||||
/* Keep has_* in alphabetical order */ \
|
||||
func(has_64bit_reloc); \
|
||||
func(has_aliasing_ppgtt); \
|
||||
func(has_csr); \
|
||||
func(has_ddi); \
|
||||
func(has_dp_mst); \
|
||||
func(has_reset_engine); \
|
||||
func(has_fbc); \
|
||||
func(has_fpga_dbg); \
|
||||
func(has_full_ppgtt); \
|
||||
func(has_full_48bit_ppgtt); \
|
||||
func(has_gmch_display); \
|
||||
func(has_guc); \
|
||||
func(has_guc_ct); \
|
||||
func(has_hotplug); \
|
||||
func(has_l3_dpf); \
|
||||
func(has_llc); \
|
||||
func(has_logical_ring_contexts); \
|
||||
func(has_logical_ring_preemption); \
|
||||
func(has_overlay); \
|
||||
func(has_pooled_eu); \
|
||||
func(has_psr); \
|
||||
func(has_rc6); \
|
||||
func(has_rc6p); \
|
||||
func(has_resource_streamer); \
|
||||
func(has_runtime_pm); \
|
||||
func(has_snoop); \
|
||||
func(unfenced_needs_alignment); \
|
||||
func(cursor_needs_physical); \
|
||||
func(hws_needs_physical); \
|
||||
func(overlay_needs_physical); \
|
||||
func(supports_tv); \
|
||||
func(has_ipc);
|
||||
|
||||
struct sseu_dev_info {
|
||||
u8 slice_mask;
|
||||
u8 subslice_mask;
|
||||
u8 eu_total;
|
||||
u8 eu_per_subslice;
|
||||
u8 min_eu_in_pool;
|
||||
/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
|
||||
u8 subslice_7eu[3];
|
||||
u8 has_slice_pg:1;
|
||||
u8 has_subslice_pg:1;
|
||||
u8 has_eu_pg:1;
|
||||
};
|
||||
|
||||
struct intel_device_info {
|
||||
u16 device_id;
|
||||
u16 gen_mask;
|
||||
|
||||
u8 gen;
|
||||
u8 gt; /* GT number, 0 if undefined */
|
||||
u8 num_rings;
|
||||
u8 ring_mask; /* Rings supported by the HW */
|
||||
|
||||
enum intel_platform platform;
|
||||
u32 platform_mask;
|
||||
|
||||
u32 display_mmio_offset;
|
||||
|
||||
u8 num_pipes;
|
||||
u8 num_sprites[I915_MAX_PIPES];
|
||||
u8 num_scalers[I915_MAX_PIPES];
|
||||
|
||||
unsigned int page_sizes; /* page sizes supported by the HW */
|
||||
|
||||
#define DEFINE_FLAG(name) u8 name:1
|
||||
DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
|
||||
#undef DEFINE_FLAG
|
||||
u16 ddb_size; /* in blocks */
|
||||
|
||||
/* Register offsets for the various display pipes and transcoders */
|
||||
int pipe_offsets[I915_MAX_TRANSCODERS];
|
||||
int trans_offsets[I915_MAX_TRANSCODERS];
|
||||
int palette_offsets[I915_MAX_PIPES];
|
||||
int cursor_offsets[I915_MAX_PIPES];
|
||||
|
||||
/* Slice/subslice/EU info */
|
||||
struct sseu_dev_info sseu;
|
||||
|
||||
u32 cs_timestamp_frequency_khz;
|
||||
|
||||
struct color_luts {
|
||||
u16 degamma_lut_size;
|
||||
u16 gamma_lut_size;
|
||||
} color;
|
||||
};
|
||||
|
||||
static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu)
|
||||
{
|
||||
return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask);
|
||||
}
|
||||
|
||||
const char *intel_platform_name(enum intel_platform platform);
|
||||
|
||||
void intel_device_info_runtime_init(struct intel_device_info *info);
|
||||
void intel_device_info_dump(const struct intel_device_info *info,
|
||||
struct drm_printer *p);
|
||||
void intel_device_info_dump_flags(const struct intel_device_info *info,
|
||||
struct drm_printer *p);
|
||||
void intel_device_info_dump_runtime(const struct intel_device_info *info,
|
||||
struct drm_printer *p);
|
||||
|
||||
#endif
|
|
@ -3073,6 +3073,12 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
|
|||
unsigned int rotation = plane_state->base.rotation;
|
||||
int ret;
|
||||
|
||||
if (rotation & DRM_MODE_REFLECT_X &&
|
||||
fb->modifier == DRM_FORMAT_MOD_LINEAR) {
|
||||
DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!plane_state->base.visible)
|
||||
return 0;
|
||||
|
||||
|
@ -3453,9 +3459,9 @@ static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32 skl_plane_ctl_rotation(unsigned int rotation)
|
||||
static u32 skl_plane_ctl_rotate(unsigned int rotate)
|
||||
{
|
||||
switch (rotation) {
|
||||
switch (rotate) {
|
||||
case DRM_MODE_ROTATE_0:
|
||||
break;
|
||||
/*
|
||||
|
@ -3469,7 +3475,22 @@ static u32 skl_plane_ctl_rotation(unsigned int rotation)
|
|||
case DRM_MODE_ROTATE_270:
|
||||
return PLANE_CTL_ROTATE_90;
|
||||
default:
|
||||
MISSING_CASE(rotation);
|
||||
MISSING_CASE(rotate);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u32 cnl_plane_ctl_flip(unsigned int reflect)
|
||||
{
|
||||
switch (reflect) {
|
||||
case 0:
|
||||
break;
|
||||
case DRM_MODE_REFLECT_X:
|
||||
return PLANE_CTL_FLIP_HORIZONTAL;
|
||||
case DRM_MODE_REFLECT_Y:
|
||||
default:
|
||||
MISSING_CASE(reflect);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -3497,7 +3518,11 @@ u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
|
|||
|
||||
plane_ctl |= skl_plane_ctl_format(fb->format->format);
|
||||
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
|
||||
plane_ctl |= skl_plane_ctl_rotation(rotation);
|
||||
plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 10)
|
||||
plane_ctl |= cnl_plane_ctl_flip(rotation &
|
||||
DRM_MODE_REFLECT_MASK);
|
||||
|
||||
if (key->flags & I915_SET_COLORKEY_DESTINATION)
|
||||
plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
|
||||
|
@ -9694,111 +9719,27 @@ intel_framebuffer_create(struct drm_i915_gem_object *obj,
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
static u32
|
||||
intel_framebuffer_pitch_for_width(int width, int bpp)
|
||||
{
|
||||
u32 pitch = DIV_ROUND_UP(width * bpp, 8);
|
||||
return ALIGN(pitch, 64);
|
||||
}
|
||||
|
||||
static u32
|
||||
intel_framebuffer_size_for_mode(const struct drm_display_mode *mode, int bpp)
|
||||
{
|
||||
u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
|
||||
return PAGE_ALIGN(pitch * mode->vdisplay);
|
||||
}
|
||||
|
||||
static struct drm_framebuffer *
|
||||
intel_framebuffer_create_for_mode(struct drm_device *dev,
|
||||
const struct drm_display_mode *mode,
|
||||
int depth, int bpp)
|
||||
{
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
|
||||
|
||||
obj = i915_gem_object_create(to_i915(dev),
|
||||
intel_framebuffer_size_for_mode(mode, bpp));
|
||||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
mode_cmd.width = mode->hdisplay;
|
||||
mode_cmd.height = mode->vdisplay;
|
||||
mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
|
||||
bpp);
|
||||
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
|
||||
|
||||
fb = intel_framebuffer_create(obj, &mode_cmd);
|
||||
if (IS_ERR(fb))
|
||||
i915_gem_object_put(obj);
|
||||
|
||||
return fb;
|
||||
}
|
||||
|
||||
static struct drm_framebuffer *
|
||||
mode_fits_in_fbdev(struct drm_device *dev,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
#ifdef CONFIG_DRM_FBDEV_EMULATION
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_framebuffer *fb;
|
||||
|
||||
if (!dev_priv->fbdev)
|
||||
return NULL;
|
||||
|
||||
if (!dev_priv->fbdev->fb)
|
||||
return NULL;
|
||||
|
||||
obj = dev_priv->fbdev->fb->obj;
|
||||
BUG_ON(!obj);
|
||||
|
||||
fb = &dev_priv->fbdev->fb->base;
|
||||
if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
|
||||
fb->format->cpp[0] * 8))
|
||||
return NULL;
|
||||
|
||||
if (obj->base.size < mode->vdisplay * fb->pitches[0])
|
||||
return NULL;
|
||||
|
||||
drm_framebuffer_get(fb);
|
||||
return fb;
|
||||
#else
|
||||
return NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_framebuffer *fb,
|
||||
int x, int y)
|
||||
static int intel_modeset_disable_planes(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *plane_state;
|
||||
int hdisplay, vdisplay;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
plane_state = drm_atomic_get_plane_state(state, crtc->primary);
|
||||
if (IS_ERR(plane_state))
|
||||
return PTR_ERR(plane_state);
|
||||
|
||||
if (mode)
|
||||
drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay);
|
||||
else
|
||||
hdisplay = vdisplay = 0;
|
||||
|
||||
ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
|
||||
ret = drm_atomic_add_affected_planes(state, crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
drm_atomic_set_fb_for_plane(plane_state, fb);
|
||||
plane_state->crtc_x = 0;
|
||||
plane_state->crtc_y = 0;
|
||||
plane_state->crtc_w = hdisplay;
|
||||
plane_state->crtc_h = vdisplay;
|
||||
plane_state->src_x = x << 16;
|
||||
plane_state->src_y = y << 16;
|
||||
plane_state->src_w = hdisplay << 16;
|
||||
plane_state->src_h = vdisplay << 16;
|
||||
|
||||
for_each_new_plane_in_state(state, plane, plane_state, i) {
|
||||
if (plane_state->crtc != crtc)
|
||||
continue;
|
||||
|
||||
ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_atomic_set_fb_for_plane(plane_state, NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -9816,7 +9757,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
|
|||
struct drm_crtc *crtc = NULL;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct drm_framebuffer *fb;
|
||||
struct drm_mode_config *config = &dev->mode_config;
|
||||
struct drm_atomic_state *state = NULL, *restore_state = NULL;
|
||||
struct drm_connector_state *connector_state;
|
||||
|
@ -9884,10 +9824,6 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
|
|||
found:
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
state = drm_atomic_state_alloc(dev);
|
||||
restore_state = drm_atomic_state_alloc(dev);
|
||||
if (!state || !restore_state) {
|
||||
|
@ -9919,39 +9855,17 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
|
|||
if (!mode)
|
||||
mode = &load_detect_mode;
|
||||
|
||||
/* We need a framebuffer large enough to accommodate all accesses
|
||||
* that the plane may generate whilst we perform load detection.
|
||||
* We can not rely on the fbcon either being present (we get called
|
||||
* during its initialisation to detect all boot displays, or it may
|
||||
* not even exist) or that it is large enough to satisfy the
|
||||
* requested mode.
|
||||
*/
|
||||
fb = mode_fits_in_fbdev(dev, mode);
|
||||
if (fb == NULL) {
|
||||
DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
|
||||
fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
|
||||
} else
|
||||
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
|
||||
if (IS_ERR(fb)) {
|
||||
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
|
||||
ret = PTR_ERR(fb);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
|
||||
drm_framebuffer_put(fb);
|
||||
ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
|
||||
ret = intel_modeset_disable_planes(state, crtc);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
|
||||
if (!ret)
|
||||
ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
|
||||
if (!ret)
|
||||
ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
|
||||
if (ret) {
|
||||
DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
|
||||
goto fail;
|
||||
|
@ -12569,11 +12483,15 @@ static int intel_atomic_commit(struct drm_device *dev,
|
|||
INIT_WORK(&state->commit_work, intel_atomic_commit_work);
|
||||
|
||||
i915_sw_fence_commit(&intel_state->commit_ready);
|
||||
if (nonblock)
|
||||
if (nonblock && intel_state->modeset) {
|
||||
queue_work(dev_priv->modeset_wq, &state->commit_work);
|
||||
} else if (nonblock) {
|
||||
queue_work(system_unbound_wq, &state->commit_work);
|
||||
else
|
||||
} else {
|
||||
if (intel_state->modeset)
|
||||
flush_workqueue(dev_priv->modeset_wq);
|
||||
intel_atomic_commit_tail(state);
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -13238,7 +13156,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
|
|||
primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
|
||||
primary->check_plane = intel_check_primary_plane;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
|
||||
if (INTEL_GEN(dev_priv) >= 10) {
|
||||
intel_primary_formats = skl_primary_formats;
|
||||
num_formats = ARRAY_SIZE(skl_primary_formats);
|
||||
modifiers = skl_format_modifiers_ccs;
|
||||
|
@ -13300,7 +13218,12 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
|
|||
if (ret)
|
||||
goto fail;
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9) {
|
||||
if (INTEL_GEN(dev_priv) >= 10) {
|
||||
supported_rotations =
|
||||
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
|
||||
DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270 |
|
||||
DRM_MODE_REFLECT_X;
|
||||
} else if (INTEL_GEN(dev_priv) >= 9) {
|
||||
supported_rotations =
|
||||
DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
|
||||
DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
|
||||
|
@ -14531,6 +14454,8 @@ int intel_modeset_init(struct drm_device *dev)
|
|||
enum pipe pipe;
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
|
||||
|
||||
drm_mode_config_init(dev);
|
||||
|
||||
dev->mode_config.min_width = 0;
|
||||
|
@ -15335,6 +15260,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||
intel_cleanup_gt_powersave(dev_priv);
|
||||
|
||||
intel_teardown_gmbus(dev_priv);
|
||||
|
||||
destroy_workqueue(dev_priv->modeset_wq);
|
||||
}
|
||||
|
||||
void intel_connector_attach_encoder(struct intel_connector *connector,
|
||||
|
|
|
@ -0,0 +1,321 @@
|
|||
/*
|
||||
* Copyright © 2006-2017 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_DISPLAY_H_
|
||||
#define _INTEL_DISPLAY_H_
|
||||
|
||||
enum pipe {
|
||||
INVALID_PIPE = -1,
|
||||
|
||||
PIPE_A = 0,
|
||||
PIPE_B,
|
||||
PIPE_C,
|
||||
_PIPE_EDP,
|
||||
|
||||
I915_MAX_PIPES = _PIPE_EDP
|
||||
};
|
||||
|
||||
#define pipe_name(p) ((p) + 'A')
|
||||
|
||||
enum transcoder {
|
||||
TRANSCODER_A = 0,
|
||||
TRANSCODER_B,
|
||||
TRANSCODER_C,
|
||||
TRANSCODER_EDP,
|
||||
TRANSCODER_DSI_A,
|
||||
TRANSCODER_DSI_C,
|
||||
|
||||
I915_MAX_TRANSCODERS
|
||||
};
|
||||
|
||||
static inline const char *transcoder_name(enum transcoder transcoder)
|
||||
{
|
||||
switch (transcoder) {
|
||||
case TRANSCODER_A:
|
||||
return "A";
|
||||
case TRANSCODER_B:
|
||||
return "B";
|
||||
case TRANSCODER_C:
|
||||
return "C";
|
||||
case TRANSCODER_EDP:
|
||||
return "EDP";
|
||||
case TRANSCODER_DSI_A:
|
||||
return "DSI A";
|
||||
case TRANSCODER_DSI_C:
|
||||
return "DSI C";
|
||||
default:
|
||||
return "<invalid>";
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool transcoder_is_dsi(enum transcoder transcoder)
|
||||
{
|
||||
return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C;
|
||||
}
|
||||
|
||||
/*
|
||||
* Global legacy plane identifier. Valid only for primary/sprite
|
||||
* planes on pre-g4x, and only for primary planes on g4x-bdw.
|
||||
*/
|
||||
enum i9xx_plane_id {
|
||||
PLANE_A,
|
||||
PLANE_B,
|
||||
PLANE_C,
|
||||
};
|
||||
|
||||
#define plane_name(p) ((p) + 'A')
|
||||
#define sprite_name(p, s) ((p) * INTEL_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
|
||||
|
||||
/*
|
||||
* Per-pipe plane identifier.
|
||||
* I915_MAX_PLANES in the enum below is the maximum (across all platforms)
|
||||
* number of planes per CRTC. Not all platforms really have this many planes,
|
||||
* which means some arrays of size I915_MAX_PLANES may have unused entries
|
||||
* between the topmost sprite plane and the cursor plane.
|
||||
*
|
||||
* This is expected to be passed to various register macros
|
||||
* (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care.
|
||||
*/
|
||||
enum plane_id {
|
||||
PLANE_PRIMARY,
|
||||
PLANE_SPRITE0,
|
||||
PLANE_SPRITE1,
|
||||
PLANE_SPRITE2,
|
||||
PLANE_CURSOR,
|
||||
|
||||
I915_MAX_PLANES,
|
||||
};
|
||||
|
||||
#define for_each_plane_id_on_crtc(__crtc, __p) \
|
||||
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
|
||||
for_each_if((__crtc)->plane_ids_mask & BIT(__p))
|
||||
|
||||
enum port {
|
||||
PORT_NONE = -1,
|
||||
|
||||
PORT_A = 0,
|
||||
PORT_B,
|
||||
PORT_C,
|
||||
PORT_D,
|
||||
PORT_E,
|
||||
|
||||
I915_MAX_PORTS
|
||||
};
|
||||
|
||||
#define port_name(p) ((p) + 'A')
|
||||
|
||||
enum dpio_channel {
|
||||
DPIO_CH0,
|
||||
DPIO_CH1
|
||||
};
|
||||
|
||||
enum dpio_phy {
|
||||
DPIO_PHY0,
|
||||
DPIO_PHY1,
|
||||
DPIO_PHY2,
|
||||
};
|
||||
|
||||
#define I915_NUM_PHYS_VLV 2
|
||||
|
||||
enum intel_display_power_domain {
|
||||
POWER_DOMAIN_PIPE_A,
|
||||
POWER_DOMAIN_PIPE_B,
|
||||
POWER_DOMAIN_PIPE_C,
|
||||
POWER_DOMAIN_PIPE_A_PANEL_FITTER,
|
||||
POWER_DOMAIN_PIPE_B_PANEL_FITTER,
|
||||
POWER_DOMAIN_PIPE_C_PANEL_FITTER,
|
||||
POWER_DOMAIN_TRANSCODER_A,
|
||||
POWER_DOMAIN_TRANSCODER_B,
|
||||
POWER_DOMAIN_TRANSCODER_C,
|
||||
POWER_DOMAIN_TRANSCODER_EDP,
|
||||
POWER_DOMAIN_TRANSCODER_DSI_A,
|
||||
POWER_DOMAIN_TRANSCODER_DSI_C,
|
||||
POWER_DOMAIN_PORT_DDI_A_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_B_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_C_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_D_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_E_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_A_IO,
|
||||
POWER_DOMAIN_PORT_DDI_B_IO,
|
||||
POWER_DOMAIN_PORT_DDI_C_IO,
|
||||
POWER_DOMAIN_PORT_DDI_D_IO,
|
||||
POWER_DOMAIN_PORT_DDI_E_IO,
|
||||
POWER_DOMAIN_PORT_DSI,
|
||||
POWER_DOMAIN_PORT_CRT,
|
||||
POWER_DOMAIN_PORT_OTHER,
|
||||
POWER_DOMAIN_VGA,
|
||||
POWER_DOMAIN_AUDIO,
|
||||
POWER_DOMAIN_PLLS,
|
||||
POWER_DOMAIN_AUX_A,
|
||||
POWER_DOMAIN_AUX_B,
|
||||
POWER_DOMAIN_AUX_C,
|
||||
POWER_DOMAIN_AUX_D,
|
||||
POWER_DOMAIN_GMBUS,
|
||||
POWER_DOMAIN_MODESET,
|
||||
POWER_DOMAIN_GT_IRQ,
|
||||
POWER_DOMAIN_INIT,
|
||||
|
||||
POWER_DOMAIN_NUM,
|
||||
};
|
||||
|
||||
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
|
||||
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
|
||||
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
|
||||
#define POWER_DOMAIN_TRANSCODER(tran) \
|
||||
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
|
||||
(tran) + POWER_DOMAIN_TRANSCODER_A)
|
||||
|
||||
/* Used by dp and fdi links */
|
||||
struct intel_link_m_n {
|
||||
u32 tu;
|
||||
u32 gmch_m;
|
||||
u32 gmch_n;
|
||||
u32 link_m;
|
||||
u32 link_n;
|
||||
};
|
||||
|
||||
#define for_each_pipe(__dev_priv, __p) \
|
||||
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
|
||||
|
||||
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
|
||||
for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \
|
||||
for_each_if((__mask) & BIT(__p))
|
||||
|
||||
#define for_each_universal_plane(__dev_priv, __pipe, __p) \
|
||||
for ((__p) = 0; \
|
||||
(__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \
|
||||
(__p)++)
|
||||
|
||||
#define for_each_sprite(__dev_priv, __p, __s) \
|
||||
for ((__s) = 0; \
|
||||
(__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \
|
||||
(__s)++)
|
||||
|
||||
#define for_each_port_masked(__port, __ports_mask) \
|
||||
for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \
|
||||
for_each_if((__ports_mask) & BIT(__port))
|
||||
|
||||
#define for_each_crtc(dev, crtc) \
|
||||
list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
|
||||
|
||||
#define for_each_intel_plane(dev, intel_plane) \
|
||||
list_for_each_entry(intel_plane, \
|
||||
&(dev)->mode_config.plane_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
|
||||
list_for_each_entry(intel_plane, \
|
||||
&(dev)->mode_config.plane_list, \
|
||||
base.head) \
|
||||
for_each_if((plane_mask) & \
|
||||
BIT(drm_plane_index(&intel_plane->base)))
|
||||
|
||||
#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
|
||||
list_for_each_entry(intel_plane, \
|
||||
&(dev)->mode_config.plane_list, \
|
||||
base.head) \
|
||||
for_each_if((intel_plane)->pipe == (intel_crtc)->pipe)
|
||||
|
||||
#define for_each_intel_crtc(dev, intel_crtc) \
|
||||
list_for_each_entry(intel_crtc, \
|
||||
&(dev)->mode_config.crtc_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
|
||||
list_for_each_entry(intel_crtc, \
|
||||
&(dev)->mode_config.crtc_list, \
|
||||
base.head) \
|
||||
for_each_if((crtc_mask) & BIT(drm_crtc_index(&intel_crtc->base)))
|
||||
|
||||
#define for_each_intel_encoder(dev, intel_encoder) \
|
||||
list_for_each_entry(intel_encoder, \
|
||||
&(dev)->mode_config.encoder_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_connector_iter(intel_connector, iter) \
|
||||
while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
|
||||
|
||||
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
|
||||
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
|
||||
for_each_if((intel_encoder)->base.crtc == (__crtc))
|
||||
|
||||
#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
|
||||
list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
|
||||
for_each_if((intel_connector)->base.encoder == (__encoder))
|
||||
|
||||
#define for_each_power_domain(domain, mask) \
|
||||
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
|
||||
for_each_if(BIT_ULL(domain) & (mask))
|
||||
|
||||
#define for_each_power_well(__dev_priv, __power_well) \
|
||||
for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
|
||||
(__power_well) - (__dev_priv)->power_domains.power_wells < \
|
||||
(__dev_priv)->power_domains.power_well_count; \
|
||||
(__power_well)++)
|
||||
|
||||
#define for_each_power_well_rev(__dev_priv, __power_well) \
|
||||
for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
|
||||
(__dev_priv)->power_domains.power_well_count - 1; \
|
||||
(__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
|
||||
(__power_well)--)
|
||||
|
||||
#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
|
||||
for_each_power_well(__dev_priv, __power_well) \
|
||||
for_each_if((__power_well)->domains & (__domain_mask))
|
||||
|
||||
#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
|
||||
for_each_power_well_rev(__dev_priv, __power_well) \
|
||||
for_each_if((__power_well)->domains & (__domain_mask))
|
||||
|
||||
#define for_each_new_intel_plane_in_state(__state, plane, new_plane_state, __i) \
|
||||
for ((__i) = 0; \
|
||||
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
|
||||
((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
|
||||
(new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
|
||||
(__i)++) \
|
||||
for_each_if(plane)
|
||||
|
||||
#define for_each_new_intel_crtc_in_state(__state, crtc, new_crtc_state, __i) \
|
||||
for ((__i) = 0; \
|
||||
(__i) < (__state)->base.dev->mode_config.num_crtc && \
|
||||
((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \
|
||||
(new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \
|
||||
(__i)++) \
|
||||
for_each_if(crtc)
|
||||
|
||||
#define for_each_oldnew_intel_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
|
||||
for ((__i) = 0; \
|
||||
(__i) < (__state)->base.dev->mode_config.num_total_plane && \
|
||||
((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
|
||||
(old_plane_state) = to_intel_plane_state((__state)->base.planes[__i].old_state), \
|
||||
(new_plane_state) = to_intel_plane_state((__state)->base.planes[__i].new_state), 1); \
|
||||
(__i)++) \
|
||||
for_each_if(plane)
|
||||
|
||||
void intel_link_compute_m_n(int bpp, int nlanes,
|
||||
int pixel_clock, int link_clock,
|
||||
struct intel_link_m_n *m_n,
|
||||
bool reduce_m_n);
|
||||
|
||||
#endif
|
|
@ -1272,6 +1272,9 @@ static int cnl_init_workarounds(struct intel_engine_cs *engine)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* WaDisableEarlyEOT:cnl */
|
||||
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1664,6 +1667,35 @@ static void print_request(struct drm_printer *m,
|
|||
rq->timeline->common->name);
|
||||
}
|
||||
|
||||
static void hexdump(struct drm_printer *m, const void *buf, size_t len)
|
||||
{
|
||||
const size_t rowsize = 8 * sizeof(u32);
|
||||
const void *prev = NULL;
|
||||
bool skip = false;
|
||||
size_t pos;
|
||||
|
||||
for (pos = 0; pos < len; pos += rowsize) {
|
||||
char line[128];
|
||||
|
||||
if (prev && !memcmp(prev, buf + pos, rowsize)) {
|
||||
if (!skip) {
|
||||
drm_printf(m, "*\n");
|
||||
skip = true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos,
|
||||
rowsize, sizeof(u32),
|
||||
line, sizeof(line),
|
||||
false) >= sizeof(line));
|
||||
drm_printf(m, "%08zx %s\n", pos, line);
|
||||
|
||||
prev = buf + pos;
|
||||
skip = false;
|
||||
}
|
||||
}
|
||||
|
||||
void intel_engine_dump(struct intel_engine_cs *engine,
|
||||
struct drm_printer *m,
|
||||
const char *header, ...)
|
||||
|
@ -1757,6 +1789,24 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
|||
addr = intel_engine_get_last_batch_head(engine);
|
||||
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
|
||||
upper_32_bits(addr), lower_32_bits(addr));
|
||||
if (INTEL_GEN(dev_priv) >= 8)
|
||||
addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
|
||||
RING_DMA_FADD_UDW(engine->mmio_base));
|
||||
else if (INTEL_GEN(dev_priv) >= 4)
|
||||
addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
|
||||
else
|
||||
addr = I915_READ(DMA_FADD_I8XX);
|
||||
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
|
||||
upper_32_bits(addr), lower_32_bits(addr));
|
||||
if (INTEL_GEN(dev_priv) >= 4) {
|
||||
drm_printf(m, "\tIPEIR: 0x%08x\n",
|
||||
I915_READ(RING_IPEIR(engine->mmio_base)));
|
||||
drm_printf(m, "\tIPEHR: 0x%08x\n",
|
||||
I915_READ(RING_IPEHR(engine->mmio_base)));
|
||||
} else {
|
||||
drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
|
||||
drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
|
||||
}
|
||||
|
||||
if (HAS_EXECLISTS(dev_priv)) {
|
||||
const u32 *hws = &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
|
||||
|
@ -1848,8 +1898,11 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
|||
&engine->irq_posted)),
|
||||
yesno(test_bit(ENGINE_IRQ_EXECLIST,
|
||||
&engine->irq_posted)));
|
||||
|
||||
drm_printf(m, "HWSP:\n");
|
||||
hexdump(m, engine->status_page.page_addr, PAGE_SIZE);
|
||||
|
||||
drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine)));
|
||||
drm_printf(m, "\n");
|
||||
}
|
||||
|
||||
static u8 user_class_map[] = {
|
||||
|
|
|
@ -429,18 +429,18 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
|
|||
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
struct intel_engine_hangcheck cur_state, *hc = &cur_state;
|
||||
const bool busy = intel_engine_has_waiter(engine);
|
||||
struct intel_engine_hangcheck hc;
|
||||
|
||||
semaphore_clear_deadlocks(dev_priv);
|
||||
|
||||
hangcheck_load_sample(engine, hc);
|
||||
hangcheck_accumulate_sample(engine, hc);
|
||||
hangcheck_store_sample(engine, hc);
|
||||
hangcheck_load_sample(engine, &hc);
|
||||
hangcheck_accumulate_sample(engine, &hc);
|
||||
hangcheck_store_sample(engine, &hc);
|
||||
|
||||
if (engine->hangcheck.stalled) {
|
||||
hung |= intel_engine_flag(engine);
|
||||
if (hc->action != ENGINE_DEAD)
|
||||
if (hc.action != ENGINE_DEAD)
|
||||
stuck |= intel_engine_flag(engine);
|
||||
}
|
||||
|
||||
|
|
|
@ -128,22 +128,46 @@ intel_i2c_reset(struct drm_i915_private *dev_priv)
|
|||
I915_WRITE(GMBUS4, 0);
|
||||
}
|
||||
|
||||
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
|
||||
static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
|
||||
bool enable)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* When using bit bashing for I2C, this bit needs to be set to 1 */
|
||||
if (!IS_PINEVIEW(dev_priv))
|
||||
return;
|
||||
|
||||
val = I915_READ(DSPCLK_GATE_D);
|
||||
if (enable)
|
||||
val |= DPCUNIT_CLOCK_GATE_DISABLE;
|
||||
if (!enable)
|
||||
val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
|
||||
else
|
||||
val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
|
||||
val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
|
||||
I915_WRITE(DSPCLK_GATE_D, val);
|
||||
}
|
||||
|
||||
static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
|
||||
bool enable)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = I915_READ(SOUTH_DSPCLK_GATE_D);
|
||||
if (!enable)
|
||||
val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
|
||||
else
|
||||
val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
|
||||
I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
|
||||
}
|
||||
|
||||
static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
|
||||
bool enable)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = I915_READ(GEN9_CLKGATE_DIS_4);
|
||||
if (!enable)
|
||||
val |= BXT_GMBUS_GATING_DIS;
|
||||
else
|
||||
val &= ~BXT_GMBUS_GATING_DIS;
|
||||
I915_WRITE(GEN9_CLKGATE_DIS_4, val);
|
||||
}
|
||||
|
||||
static u32 get_reserved(struct intel_gmbus *bus)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
|
@ -221,7 +245,10 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
|
|||
struct drm_i915_private *dev_priv = bus->dev_priv;
|
||||
|
||||
intel_i2c_reset(dev_priv);
|
||||
intel_i2c_quirk_set(dev_priv, true);
|
||||
|
||||
if (IS_PINEVIEW(dev_priv))
|
||||
pnv_gmbus_clock_gating(dev_priv, false);
|
||||
|
||||
set_data(bus, 1);
|
||||
set_clock(bus, 1);
|
||||
udelay(I2C_RISEFALL_TIME);
|
||||
|
@ -238,7 +265,9 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
|
|||
|
||||
set_data(bus, 1);
|
||||
set_clock(bus, 1);
|
||||
intel_i2c_quirk_set(dev_priv, false);
|
||||
|
||||
if (IS_PINEVIEW(dev_priv))
|
||||
pnv_gmbus_clock_gating(dev_priv, true);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -481,6 +510,13 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
|
|||
int i = 0, inc, try = 0;
|
||||
int ret = 0;
|
||||
|
||||
/* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
bxt_gmbus_clock_gating(dev_priv, false);
|
||||
else if (HAS_PCH_SPT(dev_priv) ||
|
||||
HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
|
||||
pch_gmbus_clock_gating(dev_priv, false);
|
||||
|
||||
retry:
|
||||
I915_WRITE_FW(GMBUS0, bus->reg0);
|
||||
|
||||
|
@ -582,6 +618,13 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
|
|||
ret = -EAGAIN;
|
||||
|
||||
out:
|
||||
/* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
|
||||
if (IS_GEN9_LP(dev_priv))
|
||||
bxt_gmbus_clock_gating(dev_priv, true);
|
||||
else if (HAS_PCH_SPT(dev_priv) ||
|
||||
HAS_PCH_KBP(dev_priv) || HAS_PCH_CNP(dev_priv))
|
||||
pch_gmbus_clock_gating(dev_priv, true);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -193,7 +193,7 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
|
|||
};
|
||||
|
||||
if (!pci_dev_present(atom_hdaudio_ids)) {
|
||||
DRM_INFO("%s\n", "HDaudio controller not detected, using LPE audio instead\n");
|
||||
DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
|
||||
lpe_present = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -449,7 +449,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
|
|||
|
||||
GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x\n",
|
||||
engine->name, n,
|
||||
rq->ctx->hw_id, count,
|
||||
port[n].context_id, count,
|
||||
rq->global_seqno);
|
||||
} else {
|
||||
GEM_BUG_ON(!n);
|
||||
|
@ -504,7 +504,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
|
|||
ce->ring->tail &= (ce->ring->size - 1);
|
||||
ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
|
||||
|
||||
GEM_TRACE("\n");
|
||||
GEM_TRACE("%s\n", engine->name);
|
||||
for (n = execlists_num_ports(&engine->execlists); --n; )
|
||||
elsp_write(0, engine->execlists.elsp);
|
||||
|
||||
|
@ -861,9 +861,10 @@ static void execlists_submission_tasklet(unsigned long data)
|
|||
*/
|
||||
|
||||
status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
|
||||
GEM_TRACE("%s csb[%dd]: status=0x%08x:0x%08x\n",
|
||||
GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
|
||||
engine->name, head,
|
||||
status, buf[2*head + 1]);
|
||||
status, buf[2*head + 1],
|
||||
execlists->active);
|
||||
|
||||
if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
|
||||
GEN8_CTX_STATUS_PREEMPTED))
|
||||
|
@ -881,6 +882,8 @@ static void execlists_submission_tasklet(unsigned long data)
|
|||
|
||||
if (status & GEN8_CTX_STATUS_COMPLETE &&
|
||||
buf[2*head + 1] == PREEMPT_ID) {
|
||||
GEM_TRACE("%s preempt-idle\n", engine->name);
|
||||
|
||||
execlists_cancel_port_requests(execlists);
|
||||
execlists_unwind_incomplete_requests(execlists);
|
||||
|
||||
|
@ -905,8 +908,8 @@ static void execlists_submission_tasklet(unsigned long data)
|
|||
rq = port_unpack(port, &count);
|
||||
GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x\n",
|
||||
engine->name,
|
||||
rq->ctx->hw_id, count,
|
||||
rq->global_seqno);
|
||||
port->context_id, count,
|
||||
rq ? rq->global_seqno : 0);
|
||||
GEM_BUG_ON(count == 0);
|
||||
if (--count == 0) {
|
||||
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
|
||||
|
@ -1555,6 +1558,8 @@ static void reset_common_ring(struct intel_engine_cs *engine,
|
|||
struct intel_context *ce;
|
||||
unsigned long flags;
|
||||
|
||||
GEM_TRACE("%s seqno=%x\n",
|
||||
engine->name, request ? request->global_seqno : 0);
|
||||
spin_lock_irqsave(&engine->timeline->lock, flags);
|
||||
|
||||
/*
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/i915_drm.h>
|
||||
|
||||
#include "intel_opregion.h"
|
||||
#include "i915_drv.h"
|
||||
#include "intel_drv.h"
|
||||
|
||||
|
|
|
@ -0,0 +1,106 @@
|
|||
/*
|
||||
* Copyright © 2008-2017 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _INTEL_OPREGION_H_
|
||||
#define _INTEL_OPREGION_H_
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
struct drm_i915_private;
|
||||
struct intel_encoder;
|
||||
|
||||
struct opregion_header;
|
||||
struct opregion_acpi;
|
||||
struct opregion_swsci;
|
||||
struct opregion_asle;
|
||||
|
||||
struct intel_opregion {
|
||||
struct opregion_header *header;
|
||||
struct opregion_acpi *acpi;
|
||||
struct opregion_swsci *swsci;
|
||||
u32 swsci_gbda_sub_functions;
|
||||
u32 swsci_sbcb_sub_functions;
|
||||
struct opregion_asle *asle;
|
||||
void *rvda;
|
||||
void *vbt_firmware;
|
||||
const void *vbt;
|
||||
u32 vbt_size;
|
||||
u32 *lid_state;
|
||||
struct work_struct asle_work;
|
||||
};
|
||||
|
||||
#define OPREGION_SIZE (8 * 1024)
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
int intel_opregion_setup(struct drm_i915_private *dev_priv);
|
||||
void intel_opregion_register(struct drm_i915_private *dev_priv);
|
||||
void intel_opregion_unregister(struct drm_i915_private *dev_priv);
|
||||
void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
|
||||
int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
|
||||
bool enable);
|
||||
int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
|
||||
pci_power_t state);
|
||||
int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
|
||||
|
||||
#else /* CONFIG_ACPI*/
|
||||
|
||||
static inline int intel_opregion_setup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void intel_opregion_register(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int
|
||||
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
#endif
|
|
@ -8447,6 +8447,11 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
|
|||
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
|
||||
val |= SARBUNIT_CLKGATE_DIS;
|
||||
I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
|
||||
|
||||
/* WaDisableVFclkgate:cnl */
|
||||
val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE);
|
||||
val |= VFUNIT_CLKGATE_DIS;
|
||||
I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val);
|
||||
}
|
||||
|
||||
static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
|
||||
|
|
|
@ -590,7 +590,7 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
|
|||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (dev_priv->psr.active) {
|
||||
i915_reg_t psr_ctl;
|
||||
i915_reg_t psr_status;
|
||||
u32 psr_status_mask;
|
||||
|
||||
if (dev_priv->psr.aux_frame_sync)
|
||||
|
@ -599,24 +599,24 @@ static void hsw_psr_disable(struct intel_dp *intel_dp,
|
|||
0);
|
||||
|
||||
if (dev_priv->psr.psr2_support) {
|
||||
psr_ctl = EDP_PSR2_CTL;
|
||||
psr_status = EDP_PSR2_STATUS_CTL;
|
||||
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
|
||||
|
||||
I915_WRITE(psr_ctl,
|
||||
I915_READ(psr_ctl) &
|
||||
I915_WRITE(EDP_PSR2_CTL,
|
||||
I915_READ(EDP_PSR2_CTL) &
|
||||
~(EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE));
|
||||
|
||||
} else {
|
||||
psr_ctl = EDP_PSR_STATUS_CTL;
|
||||
psr_status = EDP_PSR_STATUS_CTL;
|
||||
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
|
||||
|
||||
I915_WRITE(psr_ctl,
|
||||
I915_READ(psr_ctl) & ~EDP_PSR_ENABLE);
|
||||
I915_WRITE(EDP_PSR_CTL,
|
||||
I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
|
||||
}
|
||||
|
||||
/* Wait till PSR is idle */
|
||||
if (intel_wait_for_register(dev_priv,
|
||||
psr_ctl, psr_status_mask, 0,
|
||||
psr_status, psr_status_mask, 0,
|
||||
2000))
|
||||
DRM_ERROR("Timed out waiting for PSR Idle State\n");
|
||||
|
||||
|
|
|
@ -1726,13 +1726,13 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
|||
BIT_ULL(POWER_DOMAIN_AUX_C) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUDIO) | \
|
||||
BIT_ULL(POWER_DOMAIN_VGA) | \
|
||||
BIT_ULL(POWER_DOMAIN_GMBUS) | \
|
||||
BIT_ULL(POWER_DOMAIN_INIT))
|
||||
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
|
||||
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
|
||||
BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
|
||||
BIT_ULL(POWER_DOMAIN_MODESET) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_A) | \
|
||||
BIT_ULL(POWER_DOMAIN_GMBUS) | \
|
||||
BIT_ULL(POWER_DOMAIN_INIT))
|
||||
#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
|
||||
BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
|
||||
|
@ -1792,6 +1792,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
|
|||
BIT_ULL(POWER_DOMAIN_GT_IRQ) | \
|
||||
BIT_ULL(POWER_DOMAIN_MODESET) | \
|
||||
BIT_ULL(POWER_DOMAIN_AUX_A) | \
|
||||
BIT_ULL(POWER_DOMAIN_GMBUS) | \
|
||||
BIT_ULL(POWER_DOMAIN_INIT))
|
||||
|
||||
#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
|
||||
|
|
|
@ -1637,7 +1637,7 @@ static int igt_shrink_thp(void *arg)
|
|||
* shmem to truncate our pages.
|
||||
*/
|
||||
i915_gem_shrink_all(i915);
|
||||
if (!IS_ERR_OR_NULL(obj->mm.pages)) {
|
||||
if (i915_gem_object_has_pages(obj)) {
|
||||
pr_err("shrink-all didn't truncate the pages\n");
|
||||
err = -EINVAL;
|
||||
goto out_close;
|
||||
|
|
|
@ -132,6 +132,12 @@ static int emit_recurse_batch(struct hang *h,
|
|||
*batch++ = lower_32_bits(hws_address(hws, rq));
|
||||
*batch++ = upper_32_bits(hws_address(hws, rq));
|
||||
*batch++ = rq->fence.seqno;
|
||||
*batch++ = MI_ARB_CHECK;
|
||||
|
||||
memset(batch, 0, 1024);
|
||||
batch += 1024 / sizeof(*batch);
|
||||
|
||||
*batch++ = MI_ARB_CHECK;
|
||||
*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
|
||||
*batch++ = lower_32_bits(vma->node.start);
|
||||
*batch++ = upper_32_bits(vma->node.start);
|
||||
|
@ -140,6 +146,12 @@ static int emit_recurse_batch(struct hang *h,
|
|||
*batch++ = 0;
|
||||
*batch++ = lower_32_bits(hws_address(hws, rq));
|
||||
*batch++ = rq->fence.seqno;
|
||||
*batch++ = MI_ARB_CHECK;
|
||||
|
||||
memset(batch, 0, 1024);
|
||||
batch += 1024 / sizeof(*batch);
|
||||
|
||||
*batch++ = MI_ARB_CHECK;
|
||||
*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
|
||||
*batch++ = lower_32_bits(vma->node.start);
|
||||
} else if (INTEL_GEN(i915) >= 4) {
|
||||
|
@ -147,12 +159,24 @@ static int emit_recurse_batch(struct hang *h,
|
|||
*batch++ = 0;
|
||||
*batch++ = lower_32_bits(hws_address(hws, rq));
|
||||
*batch++ = rq->fence.seqno;
|
||||
*batch++ = MI_ARB_CHECK;
|
||||
|
||||
memset(batch, 0, 1024);
|
||||
batch += 1024 / sizeof(*batch);
|
||||
|
||||
*batch++ = MI_ARB_CHECK;
|
||||
*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
|
||||
*batch++ = lower_32_bits(vma->node.start);
|
||||
} else {
|
||||
*batch++ = MI_STORE_DWORD_IMM;
|
||||
*batch++ = lower_32_bits(hws_address(hws, rq));
|
||||
*batch++ = rq->fence.seqno;
|
||||
*batch++ = MI_ARB_CHECK;
|
||||
|
||||
memset(batch, 0, 1024);
|
||||
batch += 1024 / sizeof(*batch);
|
||||
|
||||
*batch++ = MI_ARB_CHECK;
|
||||
*batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1;
|
||||
*batch++ = lower_32_bits(vma->node.start);
|
||||
}
|
||||
|
@ -234,6 +258,16 @@ static void hang_fini(struct hang *h)
|
|||
i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED);
|
||||
}
|
||||
|
||||
static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
|
||||
{
|
||||
return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
|
||||
rq->fence.seqno),
|
||||
10) &&
|
||||
wait_for(i915_seqno_passed(hws_seqno(h, rq),
|
||||
rq->fence.seqno),
|
||||
1000));
|
||||
}
|
||||
|
||||
static int igt_hang_sanitycheck(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
|
@ -296,6 +330,9 @@ static void global_reset_lock(struct drm_i915_private *i915)
|
|||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
pr_debug("%s: current gpu_error=%08lx\n",
|
||||
__func__, i915->gpu_error.flags);
|
||||
|
||||
while (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags))
|
||||
wait_event(i915->gpu_error.reset_queue,
|
||||
!test_bit(I915_RESET_BACKOFF,
|
||||
|
@ -353,54 +390,128 @@ static int igt_global_reset(void *arg)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int igt_reset_engine(void *arg)
|
||||
static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
unsigned int reset_count, reset_engine_count;
|
||||
struct hang h;
|
||||
int err = 0;
|
||||
|
||||
/* Check that we can issue a global GPU and engine reset */
|
||||
/* Check that we can issue an engine reset on an idle engine (no-op) */
|
||||
|
||||
if (!intel_has_reset_engine(i915))
|
||||
return 0;
|
||||
|
||||
if (active) {
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
err = hang_init(&h, i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
|
||||
unsigned int reset_count, reset_engine_count;
|
||||
IGT_TIMEOUT(end_time);
|
||||
|
||||
if (active && !intel_engine_can_store_dword(engine))
|
||||
continue;
|
||||
|
||||
reset_count = i915_reset_count(&i915->gpu_error);
|
||||
reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
|
||||
engine);
|
||||
|
||||
err = i915_reset_engine(engine, I915_RESET_QUIET);
|
||||
if (err) {
|
||||
pr_err("i915_reset_engine failed\n");
|
||||
break;
|
||||
}
|
||||
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
|
||||
do {
|
||||
if (active) {
|
||||
struct drm_i915_gem_request *rq;
|
||||
|
||||
if (i915_reset_count(&i915->gpu_error) != reset_count) {
|
||||
pr_err("Full GPU reset recorded! (engine reset expected)\n");
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
rq = hang_create_request(&h, engine,
|
||||
i915->kernel_context);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
break;
|
||||
}
|
||||
|
||||
if (i915_reset_engine_count(&i915->gpu_error, engine) ==
|
||||
reset_engine_count) {
|
||||
pr_err("No %s engine reset recorded!\n", engine->name);
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
i915_gem_request_get(rq);
|
||||
__i915_add_request(rq, true);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
clear_bit(I915_RESET_ENGINE + engine->id,
|
||||
&i915->gpu_error.flags);
|
||||
if (!wait_for_hang(&h, rq)) {
|
||||
struct drm_printer p = drm_info_printer(i915->drm.dev);
|
||||
|
||||
pr_err("%s: Failed to start request %x, at %x\n",
|
||||
__func__, rq->fence.seqno, hws_seqno(&h, rq));
|
||||
intel_engine_dump(engine, &p,
|
||||
"%s\n", engine->name);
|
||||
|
||||
i915_gem_request_put(rq);
|
||||
err = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
i915_gem_request_put(rq);
|
||||
}
|
||||
|
||||
engine->hangcheck.stalled = true;
|
||||
engine->hangcheck.seqno =
|
||||
intel_engine_get_seqno(engine);
|
||||
|
||||
err = i915_reset_engine(engine, I915_RESET_QUIET);
|
||||
if (err) {
|
||||
pr_err("i915_reset_engine failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (i915_reset_count(&i915->gpu_error) != reset_count) {
|
||||
pr_err("Full GPU reset recorded! (engine reset expected)\n");
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
reset_engine_count += active;
|
||||
if (i915_reset_engine_count(&i915->gpu_error, engine) !=
|
||||
reset_engine_count) {
|
||||
pr_err("%s engine reset %srecorded!\n",
|
||||
engine->name, active ? "not " : "");
|
||||
err = -EINVAL;
|
||||
break;
|
||||
}
|
||||
|
||||
engine->hangcheck.stalled = false;
|
||||
} while (time_before(jiffies, end_time));
|
||||
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
|
||||
|
||||
if (err)
|
||||
break;
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (i915_terminally_wedged(&i915->gpu_error))
|
||||
err = -EIO;
|
||||
|
||||
if (active) {
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
hang_fini(&h);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int igt_reset_idle_engine(void *arg)
|
||||
{
|
||||
return __igt_reset_engine(arg, false);
|
||||
}
|
||||
|
||||
static int igt_reset_active_engine(void *arg)
|
||||
{
|
||||
return __igt_reset_engine(arg, true);
|
||||
}
|
||||
|
||||
static int active_engine(void *data)
|
||||
{
|
||||
struct intel_engine_cs *engine = data;
|
||||
|
@ -462,11 +573,12 @@ static int active_engine(void *data)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int igt_reset_active_engines(void *arg)
|
||||
static int __igt_reset_engine_others(struct drm_i915_private *i915,
|
||||
bool active)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct intel_engine_cs *engine, *active;
|
||||
struct intel_engine_cs *engine, *other;
|
||||
enum intel_engine_id id, tmp;
|
||||
struct hang h;
|
||||
int err = 0;
|
||||
|
||||
/* Check that issuing a reset on one engine does not interfere
|
||||
|
@ -476,24 +588,36 @@ static int igt_reset_active_engines(void *arg)
|
|||
if (!intel_has_reset_engine(i915))
|
||||
return 0;
|
||||
|
||||
if (active) {
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
err = hang_init(&h, i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
struct task_struct *threads[I915_NUM_ENGINES];
|
||||
struct task_struct *threads[I915_NUM_ENGINES] = {};
|
||||
unsigned long resets[I915_NUM_ENGINES];
|
||||
unsigned long global = i915_reset_count(&i915->gpu_error);
|
||||
unsigned long count = 0;
|
||||
IGT_TIMEOUT(end_time);
|
||||
|
||||
if (active && !intel_engine_can_store_dword(engine))
|
||||
continue;
|
||||
|
||||
memset(threads, 0, sizeof(threads));
|
||||
for_each_engine(active, i915, tmp) {
|
||||
for_each_engine(other, i915, tmp) {
|
||||
struct task_struct *tsk;
|
||||
|
||||
if (active == engine)
|
||||
resets[tmp] = i915_reset_engine_count(&i915->gpu_error,
|
||||
other);
|
||||
|
||||
if (other == engine)
|
||||
continue;
|
||||
|
||||
resets[tmp] = i915_reset_engine_count(&i915->gpu_error,
|
||||
active);
|
||||
|
||||
tsk = kthread_run(active_engine, active,
|
||||
"igt/%s", active->name);
|
||||
tsk = kthread_run(active_engine, other,
|
||||
"igt/%s", other->name);
|
||||
if (IS_ERR(tsk)) {
|
||||
err = PTR_ERR(tsk);
|
||||
goto unwind;
|
||||
|
@ -503,20 +627,70 @@ static int igt_reset_active_engines(void *arg)
|
|||
get_task_struct(tsk);
|
||||
}
|
||||
|
||||
set_bit(I915_RESET_ENGINE + engine->id, &i915->gpu_error.flags);
|
||||
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
|
||||
do {
|
||||
if (active) {
|
||||
struct drm_i915_gem_request *rq;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
rq = hang_create_request(&h, engine,
|
||||
i915->kernel_context);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
break;
|
||||
}
|
||||
|
||||
i915_gem_request_get(rq);
|
||||
__i915_add_request(rq, true);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
if (!wait_for_hang(&h, rq)) {
|
||||
struct drm_printer p = drm_info_printer(i915->drm.dev);
|
||||
|
||||
pr_err("%s: Failed to start request %x, at %x\n",
|
||||
__func__, rq->fence.seqno, hws_seqno(&h, rq));
|
||||
intel_engine_dump(engine, &p,
|
||||
"%s\n", engine->name);
|
||||
|
||||
i915_gem_request_put(rq);
|
||||
err = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
i915_gem_request_put(rq);
|
||||
}
|
||||
|
||||
engine->hangcheck.stalled = true;
|
||||
engine->hangcheck.seqno =
|
||||
intel_engine_get_seqno(engine);
|
||||
|
||||
err = i915_reset_engine(engine, I915_RESET_QUIET);
|
||||
if (err) {
|
||||
pr_err("i915_reset_engine(%s) failed, err=%d\n",
|
||||
engine->name, err);
|
||||
pr_err("i915_reset_engine(%s:%s) failed, err=%d\n",
|
||||
engine->name, active ? "active" : "idle", err);
|
||||
break;
|
||||
}
|
||||
|
||||
engine->hangcheck.stalled = false;
|
||||
count++;
|
||||
} while (time_before(jiffies, end_time));
|
||||
clear_bit(I915_RESET_ENGINE + engine->id,
|
||||
&i915->gpu_error.flags);
|
||||
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
|
||||
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
|
||||
engine->name, active ? "active" : "idle", count);
|
||||
|
||||
if (i915_reset_engine_count(&i915->gpu_error, engine) -
|
||||
resets[engine->id] != (active ? count : 0)) {
|
||||
pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
|
||||
engine->name, active ? "active" : "idle", count,
|
||||
i915_reset_engine_count(&i915->gpu_error,
|
||||
engine) - resets[engine->id]);
|
||||
if (!err)
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
unwind:
|
||||
for_each_engine(active, i915, tmp) {
|
||||
for_each_engine(other, i915, tmp) {
|
||||
int ret;
|
||||
|
||||
if (!threads[tmp])
|
||||
|
@ -524,27 +698,29 @@ static int igt_reset_active_engines(void *arg)
|
|||
|
||||
ret = kthread_stop(threads[tmp]);
|
||||
if (ret) {
|
||||
pr_err("kthread for active engine %s failed, err=%d\n",
|
||||
active->name, ret);
|
||||
pr_err("kthread for other engine %s failed, err=%d\n",
|
||||
other->name, ret);
|
||||
if (!err)
|
||||
err = ret;
|
||||
}
|
||||
put_task_struct(threads[tmp]);
|
||||
|
||||
if (resets[tmp] != i915_reset_engine_count(&i915->gpu_error,
|
||||
active)) {
|
||||
other)) {
|
||||
pr_err("Innocent engine %s was reset (count=%ld)\n",
|
||||
active->name,
|
||||
other->name,
|
||||
i915_reset_engine_count(&i915->gpu_error,
|
||||
active) - resets[tmp]);
|
||||
err = -EIO;
|
||||
other) - resets[tmp]);
|
||||
if (!err)
|
||||
err = -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (global != i915_reset_count(&i915->gpu_error)) {
|
||||
pr_err("Global reset (count=%ld)!\n",
|
||||
i915_reset_count(&i915->gpu_error) - global);
|
||||
err = -EIO;
|
||||
if (!err)
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
if (err)
|
||||
|
@ -556,9 +732,25 @@ static int igt_reset_active_engines(void *arg)
|
|||
if (i915_terminally_wedged(&i915->gpu_error))
|
||||
err = -EIO;
|
||||
|
||||
if (active) {
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
hang_fini(&h);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int igt_reset_idle_engine_others(void *arg)
|
||||
{
|
||||
return __igt_reset_engine_others(arg, false);
|
||||
}
|
||||
|
||||
static int igt_reset_active_engine_others(void *arg)
|
||||
{
|
||||
return __igt_reset_engine_others(arg, true);
|
||||
}
|
||||
|
||||
static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
|
||||
{
|
||||
u32 reset_count;
|
||||
|
@ -574,16 +766,6 @@ static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
|
|||
return reset_count;
|
||||
}
|
||||
|
||||
static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
|
||||
{
|
||||
return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
|
||||
rq->fence.seqno),
|
||||
10) &&
|
||||
wait_for(i915_seqno_passed(hws_seqno(h, rq),
|
||||
rq->fence.seqno),
|
||||
1000));
|
||||
}
|
||||
|
||||
static int igt_wait_reset(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
|
@ -617,8 +799,8 @@ static int igt_wait_reset(void *arg)
|
|||
if (!wait_for_hang(&h, rq)) {
|
||||
struct drm_printer p = drm_info_printer(i915->drm.dev);
|
||||
|
||||
pr_err("Failed to start request %x, at %x\n",
|
||||
rq->fence.seqno, hws_seqno(&h, rq));
|
||||
pr_err("%s: Failed to start request %x, at %x\n",
|
||||
__func__, rq->fence.seqno, hws_seqno(&h, rq));
|
||||
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
|
||||
|
||||
i915_reset(i915, 0);
|
||||
|
@ -712,8 +894,8 @@ static int igt_reset_queue(void *arg)
|
|||
if (!wait_for_hang(&h, prev)) {
|
||||
struct drm_printer p = drm_info_printer(i915->drm.dev);
|
||||
|
||||
pr_err("Failed to start request %x, at %x\n",
|
||||
prev->fence.seqno, hws_seqno(&h, prev));
|
||||
pr_err("%s: Failed to start request %x, at %x\n",
|
||||
__func__, prev->fence.seqno, hws_seqno(&h, prev));
|
||||
intel_engine_dump(prev->engine, &p,
|
||||
"%s\n", prev->engine->name);
|
||||
|
||||
|
@ -819,8 +1001,8 @@ static int igt_handle_error(void *arg)
|
|||
if (!wait_for_hang(&h, rq)) {
|
||||
struct drm_printer p = drm_info_printer(i915->drm.dev);
|
||||
|
||||
pr_err("Failed to start request %x, at %x\n",
|
||||
rq->fence.seqno, hws_seqno(&h, rq));
|
||||
pr_err("%s: Failed to start request %x, at %x\n",
|
||||
__func__, rq->fence.seqno, hws_seqno(&h, rq));
|
||||
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
|
||||
|
||||
i915_reset(i915, 0);
|
||||
|
@ -864,21 +1046,26 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
|
|||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
|
||||
SUBTEST(igt_hang_sanitycheck),
|
||||
SUBTEST(igt_reset_engine),
|
||||
SUBTEST(igt_reset_active_engines),
|
||||
SUBTEST(igt_reset_idle_engine),
|
||||
SUBTEST(igt_reset_active_engine),
|
||||
SUBTEST(igt_reset_idle_engine_others),
|
||||
SUBTEST(igt_reset_active_engine_others),
|
||||
SUBTEST(igt_wait_reset),
|
||||
SUBTEST(igt_reset_queue),
|
||||
SUBTEST(igt_handle_error),
|
||||
};
|
||||
bool saved_hangcheck;
|
||||
int err;
|
||||
|
||||
if (!intel_has_gpu_reset(i915))
|
||||
return 0;
|
||||
|
||||
intel_runtime_pm_get(i915);
|
||||
saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
|
||||
|
||||
err = i915_subtests(tests, i915);
|
||||
|
||||
i915_modparams.enable_hangcheck = saved_hangcheck;
|
||||
intel_runtime_pm_put(i915);
|
||||
|
||||
return err;
|
||||
|
|
|
@ -373,24 +373,46 @@
|
|||
/* CFL S */
|
||||
#define INTEL_CFL_S_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x3E90, info), /* SRV GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x3E93, info) /* SRV GT1 */
|
||||
INTEL_VGA_DEVICE(0x3E93, info), /* SRV GT1 */ \
|
||||
INTEL_VGA_DEVICE(0x3E99, info) /* SRV GT1 */
|
||||
|
||||
#define INTEL_CFL_S_GT2_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x3E96, info) /* SRV GT2 */
|
||||
INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
|
||||
|
||||
/* CFL H */
|
||||
#define INTEL_CFL_H_GT2_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
|
||||
INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
|
||||
|
||||
/* CFL U */
|
||||
/* CFL U GT1 */
|
||||
#define INTEL_CFL_U_GT1_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x3EA1, info), \
|
||||
INTEL_VGA_DEVICE(0x3EA4, info)
|
||||
|
||||
/* CFL U GT2 */
|
||||
#define INTEL_CFL_U_GT2_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x3EA0, info), \
|
||||
INTEL_VGA_DEVICE(0x3EA3, info), \
|
||||
INTEL_VGA_DEVICE(0x3EA9, info)
|
||||
|
||||
/* CFL U GT3 */
|
||||
#define INTEL_CFL_U_GT3_IDS(info) \
|
||||
INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x3EA8, info), /* ULT GT3 */ \
|
||||
INTEL_VGA_DEVICE(0x3EA5, info) /* ULT GT3 */
|
||||
INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */
|
||||
|
||||
#define INTEL_CFL_IDS(info) \
|
||||
INTEL_CFL_S_GT1_IDS(info), \
|
||||
INTEL_CFL_S_GT2_IDS(info), \
|
||||
INTEL_CFL_H_GT2_IDS(info), \
|
||||
INTEL_CFL_U_GT1_IDS(info), \
|
||||
INTEL_CFL_U_GT2_IDS(info), \
|
||||
INTEL_CFL_U_GT3_IDS(info)
|
||||
|
||||
/* CNL U 2+2 */
|
||||
#define INTEL_CNL_U_GT2_IDS(info) \
|
||||
|
|
Loading…
Reference in New Issue