mirror of https://gitee.com/openkylin/linux.git
Merge tag 'drm-intel-next-fixes-2016-07-25' of git://anongit.freedesktop.org/drm-intel into drm-next
Bunch of fixes for the 4.8 merge pull, nothing out of the ordinary. All suitably marked up with cc: stable where needed. * tag 'drm-intel-next-fixes-2016-07-25' of git://anongit.freedesktop.org/drm-intel: drm/i915/gen9: Add WaInPlaceDecompressionHang drm/i915/guc: Revert "drm/i915/guc: enable GuC loading & submission by default" drm/i915/bxt: Fix inadvertent CPU snooping due to incorrect MOCS config drm/i915/gen9: Clean up MOCS table definitions drm/i915: Set legacy properties when using legacy gamma set IOCTL. (v2) drm/i915: Enable polling when we don't have hpd drm/i915/vlv: Disable HPD in valleyview_crt_detect_hotplug() drm/i915/vlv: Reset the ADPA in vlv_display_power_well_init() drm/i915/vlv: Make intel_crt_reset() per-encoder drm/i915: Unbreak interrupts on pre-gen6 drm/i915/breadcrumbs: Queue hangcheck before sleeping
This commit is contained in:
commit
c3f8d8645e
|
@ -2413,6 +2413,9 @@ static int intel_runtime_suspend(struct device *device)
|
|||
|
||||
assert_forcewakes_inactive(dev_priv);
|
||||
|
||||
if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
|
||||
intel_hpd_poll_init(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Device suspended\n");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -284,6 +284,9 @@ struct i915_hotplug {
|
|||
u32 short_port_mask;
|
||||
struct work_struct dig_port_work;
|
||||
|
||||
struct work_struct poll_init_work;
|
||||
bool poll_enabled;
|
||||
|
||||
/*
|
||||
* if we get a HPD irq from DP and a HPD irq from non-DP
|
||||
* the non-DP HPD could block the workqueue on a mode config
|
||||
|
@ -2743,6 +2746,8 @@ struct drm_i915_cmd_table {
|
|||
#define SKL_REVID_D0 0x3
|
||||
#define SKL_REVID_E0 0x4
|
||||
#define SKL_REVID_F0 0x5
|
||||
#define SKL_REVID_G0 0x6
|
||||
#define SKL_REVID_H0 0x7
|
||||
|
||||
#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
|
||||
|
||||
|
@ -2957,6 +2962,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv);
|
|||
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
|
||||
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
|
||||
bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
|
||||
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
|
||||
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
|
||||
|
||||
/* i915_irq.c */
|
||||
static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
|
||||
|
|
|
@ -1501,15 +1501,6 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
|||
break;
|
||||
}
|
||||
|
||||
/* Ensure that even if the GPU hangs, we get woken up.
|
||||
*
|
||||
* However, note that if no one is waiting, we never notice
|
||||
* a gpu hang. Eventually, we will have to wait for a resource
|
||||
* held by the GPU and so trigger a hangcheck. In the most
|
||||
* pathological case, this will be upon memory starvation!
|
||||
*/
|
||||
i915_queue_hangcheck(req->i915);
|
||||
|
||||
timeout_remain = io_schedule_timeout(timeout_remain);
|
||||
if (timeout_remain == 0) {
|
||||
ret = -ETIME;
|
||||
|
|
|
@ -54,8 +54,8 @@ struct i915_params i915 __read_mostly = {
|
|||
.verbose_state_checks = 1,
|
||||
.nuclear_pageflip = 0,
|
||||
.edp_vswing = 0,
|
||||
.enable_guc_loading = -1,
|
||||
.enable_guc_submission = -1,
|
||||
.enable_guc_loading = 0,
|
||||
.enable_guc_submission = 0,
|
||||
.guc_log_level = -1,
|
||||
.enable_dp_mst = true,
|
||||
.inject_load_failure = 0,
|
||||
|
@ -203,12 +203,12 @@ MODULE_PARM_DESC(edp_vswing,
|
|||
module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
|
||||
MODULE_PARM_DESC(enable_guc_loading,
|
||||
"Enable GuC firmware loading "
|
||||
"(-1=auto [default], 0=never, 1=if available, 2=required)");
|
||||
"(-1=auto, 0=never [default], 1=if available, 2=required)");
|
||||
|
||||
module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
|
||||
MODULE_PARM_DESC(enable_guc_submission,
|
||||
"Enable GuC submission "
|
||||
"(-1=auto [default], 0=never, 1=if available, 2=required)");
|
||||
"(-1=auto, 0=never [default], 1=if available, 2=required)");
|
||||
|
||||
module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
|
||||
MODULE_PARM_DESC(guc_log_level,
|
||||
|
|
|
@ -1686,6 +1686,9 @@ enum skl_disp_power_wells {
|
|||
|
||||
#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
|
||||
|
||||
#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
|
||||
#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18)
|
||||
|
||||
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
|
||||
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
|
||||
|
||||
|
|
|
@ -93,6 +93,15 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
|
|||
if (!b->irq_enabled ||
|
||||
test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
|
||||
mod_timer(&b->fake_irq, jiffies + 1);
|
||||
|
||||
/* Ensure that even if the GPU hangs, we get woken up.
|
||||
*
|
||||
* However, note that if no one is waiting, we never notice
|
||||
* a gpu hang. Eventually, we will have to wait for a resource
|
||||
* held by the GPU and so trigger a hangcheck. In the most
|
||||
* pathological case, this will be upon memory starvation!
|
||||
*/
|
||||
i915_queue_hangcheck(i915);
|
||||
}
|
||||
|
||||
static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
|
||||
|
|
|
@ -329,10 +329,25 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
|
|||
struct drm_device *dev = connector->dev;
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
bool reenable_hpd;
|
||||
u32 adpa;
|
||||
bool ret;
|
||||
u32 save_adpa;
|
||||
|
||||
/*
|
||||
* Doing a force trigger causes a hpd interrupt to get sent, which can
|
||||
* get us stuck in a loop if we're polling:
|
||||
* - We enable power wells and reset the ADPA
|
||||
* - output_poll_exec does force probe on VGA, triggering a hpd
|
||||
* - HPD handler waits for poll to unlock dev->mode_config.mutex
|
||||
* - output_poll_exec shuts off the ADPA, unlocks
|
||||
* dev->mode_config.mutex
|
||||
* - HPD handler runs, resets ADPA and brings us back to the start
|
||||
*
|
||||
* Just disable HPD interrupts here to prevent this
|
||||
*/
|
||||
reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
|
||||
|
||||
save_adpa = adpa = I915_READ(crt->adpa_reg);
|
||||
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
|
||||
|
||||
|
@ -357,6 +372,9 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
|
|||
|
||||
DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
|
||||
|
||||
if (reenable_hpd)
|
||||
intel_hpd_enable(dev_priv, crt->base.hpd_pin);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -717,11 +735,11 @@ static int intel_crt_set_property(struct drm_connector *connector,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void intel_crt_reset(struct drm_connector *connector)
|
||||
void intel_crt_reset(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crt *crt = intel_attached_crt(connector);
|
||||
struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
|
||||
|
||||
if (INTEL_INFO(dev)->gen >= 5) {
|
||||
u32 adpa;
|
||||
|
@ -743,7 +761,6 @@ static void intel_crt_reset(struct drm_connector *connector)
|
|||
*/
|
||||
|
||||
static const struct drm_connector_funcs intel_crt_connector_funcs = {
|
||||
.reset = intel_crt_reset,
|
||||
.dpms = drm_atomic_helper_connector_dpms,
|
||||
.detect = intel_crt_detect,
|
||||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
|
@ -762,6 +779,7 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs
|
|||
};
|
||||
|
||||
static const struct drm_encoder_funcs intel_crt_enc_funcs = {
|
||||
.reset = intel_crt_reset,
|
||||
.destroy = intel_encoder_destroy,
|
||||
};
|
||||
|
||||
|
@ -904,5 +922,5 @@ void intel_crt_init(struct drm_device *dev)
|
|||
dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
|
||||
}
|
||||
|
||||
intel_crt_reset(connector);
|
||||
intel_crt_reset(&crt->base.base);
|
||||
}
|
||||
|
|
|
@ -13924,8 +13924,50 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
|
|||
|
||||
#undef for_each_intel_crtc_masked
|
||||
|
||||
/*
|
||||
* FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
|
||||
* drm_atomic_helper_legacy_gamma_set() directly.
|
||||
*/
|
||||
static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
|
||||
u16 *red, u16 *green, u16 *blue,
|
||||
uint32_t size)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_mode_config *config = &dev->mode_config;
|
||||
struct drm_crtc_state *state;
|
||||
int ret;
|
||||
|
||||
ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Make sure we update the legacy properties so this works when
|
||||
* atomic is not enabled.
|
||||
*/
|
||||
|
||||
state = crtc->state;
|
||||
|
||||
drm_object_property_set_value(&crtc->base,
|
||||
config->degamma_lut_property,
|
||||
(state->degamma_lut) ?
|
||||
state->degamma_lut->base.id : 0);
|
||||
|
||||
drm_object_property_set_value(&crtc->base,
|
||||
config->ctm_property,
|
||||
(state->ctm) ?
|
||||
state->ctm->base.id : 0);
|
||||
|
||||
drm_object_property_set_value(&crtc->base,
|
||||
config->gamma_lut_property,
|
||||
(state->gamma_lut) ?
|
||||
state->gamma_lut->base.id : 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_crtc_funcs intel_crtc_funcs = {
|
||||
.gamma_set = drm_atomic_helper_legacy_gamma_set,
|
||||
.gamma_set = intel_atomic_legacy_gamma_set,
|
||||
.set_config = drm_atomic_helper_set_config,
|
||||
.set_property = drm_atomic_helper_crtc_set_property,
|
||||
.destroy = intel_crtc_destroy,
|
||||
|
|
|
@ -1102,7 +1102,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
|
|||
|
||||
/* intel_crt.c */
|
||||
void intel_crt_init(struct drm_device *dev);
|
||||
|
||||
void intel_crt_reset(struct drm_encoder *encoder);
|
||||
|
||||
/* intel_ddi.c */
|
||||
void intel_ddi_clk_select(struct intel_encoder *encoder,
|
||||
|
@ -1425,6 +1425,8 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
|
|||
|
||||
/* intel_dvo.c */
|
||||
void intel_dvo_init(struct drm_device *dev);
|
||||
/* intel_hotplug.c */
|
||||
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
|
||||
|
||||
|
||||
/* legacy fbdev emulation in intel_fbdev.c */
|
||||
|
|
|
@ -452,31 +452,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
|||
*
|
||||
* This is a separate step from interrupt enabling to simplify the locking rules
|
||||
* in the driver load and resume code.
|
||||
*
|
||||
* Also see: intel_hpd_poll_init(), which enables connector polling
|
||||
*/
|
||||
void intel_hpd_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
int i;
|
||||
|
||||
for_each_hpd_pin(i) {
|
||||
dev_priv->hotplug.stats[i].count = 0;
|
||||
dev_priv->hotplug.stats[i].state = HPD_ENABLED;
|
||||
}
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head) {
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
connector->polled = intel_connector->polled;
|
||||
|
||||
/* MST has a dynamic intel_connector->encoder and it's reprobing
|
||||
* is all handled by the MST helpers. */
|
||||
if (intel_connector->mst_port)
|
||||
continue;
|
||||
|
||||
if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
|
||||
intel_connector->encoder->hpd_pin > HPD_NONE)
|
||||
connector->polled = DRM_CONNECTOR_POLL_HPD;
|
||||
}
|
||||
WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
|
||||
schedule_work(&dev_priv->hotplug.poll_init_work);
|
||||
|
||||
/*
|
||||
* Interrupt setup is already guaranteed to be single-threaded, this is
|
||||
|
@ -488,10 +477,86 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
|
|||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
||||
void i915_hpd_poll_init_work(struct work_struct *work) {
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private,
|
||||
hotplug.poll_init_work);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_connector *connector;
|
||||
bool enabled;
|
||||
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
|
||||
enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
|
||||
|
||||
list_for_each_entry(connector, &mode_config->connector_list, head) {
|
||||
struct intel_connector *intel_connector =
|
||||
to_intel_connector(connector);
|
||||
connector->polled = intel_connector->polled;
|
||||
|
||||
/* MST has a dynamic intel_connector->encoder and it's reprobing
|
||||
* is all handled by the MST helpers. */
|
||||
if (intel_connector->mst_port)
|
||||
continue;
|
||||
|
||||
if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
|
||||
intel_connector->encoder->hpd_pin > HPD_NONE) {
|
||||
connector->polled = enabled ?
|
||||
DRM_CONNECTOR_POLL_CONNECT |
|
||||
DRM_CONNECTOR_POLL_DISCONNECT :
|
||||
DRM_CONNECTOR_POLL_HPD;
|
||||
}
|
||||
}
|
||||
|
||||
if (enabled)
|
||||
drm_kms_helper_poll_enable_locked(dev);
|
||||
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
/*
|
||||
* We might have missed any hotplugs that happened while we were
|
||||
* in the middle of disabling polling
|
||||
*/
|
||||
if (!enabled)
|
||||
drm_helper_hpd_irq_event(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_hpd_poll_init - enables/disables polling for connectors with hpd
|
||||
* @dev_priv: i915 device instance
|
||||
* @enabled: Whether to enable or disable polling
|
||||
*
|
||||
* This function enables polling for all connectors, regardless of whether or
|
||||
* not they support hotplug detection. Under certain conditions HPD may not be
|
||||
* functional. On most Intel GPUs, this happens when we enter runtime suspend.
|
||||
* On Valleyview and Cherryview systems, this also happens when we shut off all
|
||||
* of the powerwells.
|
||||
*
|
||||
* Since this function can get called in contexts where we're already holding
|
||||
* dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
|
||||
* worker.
|
||||
*
|
||||
* Also see: intel_hpd_init(), which restores hpd handling.
|
||||
*/
|
||||
void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
|
||||
|
||||
/*
|
||||
* We might already be holding dev->mode_config.mutex, so do this in a
|
||||
* seperate worker
|
||||
* As well, there's no issue if we race here since we always reschedule
|
||||
* this worker anyway
|
||||
*/
|
||||
schedule_work(&dev_priv->hotplug.poll_init_work);
|
||||
}
|
||||
|
||||
void intel_hpd_init_work(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
|
||||
INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
|
||||
INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
|
||||
INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
|
||||
intel_hpd_irq_storm_reenable_work);
|
||||
}
|
||||
|
@ -508,5 +573,33 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
|
|||
|
||||
cancel_work_sync(&dev_priv->hotplug.dig_port_work);
|
||||
cancel_work_sync(&dev_priv->hotplug.hotplug_work);
|
||||
cancel_work_sync(&dev_priv->hotplug.poll_init_work);
|
||||
cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
|
||||
}
|
||||
|
||||
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
if (pin == HPD_NONE)
|
||||
return false;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
|
||||
dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
|
||||
ret = true;
|
||||
}
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
|
||||
{
|
||||
if (pin == HPD_NONE)
|
||||
return;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
}
|
||||
|
|
|
@ -66,9 +66,10 @@ struct drm_i915_mocs_table {
|
|||
#define L3_WB 3
|
||||
|
||||
/* Target cache */
|
||||
#define ELLC 0
|
||||
#define LLC 1
|
||||
#define LLC_ELLC 2
|
||||
#define LE_TC_PAGETABLE 0
|
||||
#define LE_TC_LLC 1
|
||||
#define LE_TC_LLC_ELLC 2
|
||||
#define LE_TC_LLC_ELLC_ALT 3
|
||||
|
||||
/*
|
||||
* MOCS tables
|
||||
|
@ -96,34 +97,67 @@ struct drm_i915_mocs_table {
|
|||
* end.
|
||||
*/
|
||||
static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
|
||||
/* { 0x00000009, 0x0010 } */
|
||||
{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
|
||||
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
|
||||
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
|
||||
/* { 0x00000038, 0x0030 } */
|
||||
{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
|
||||
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
|
||||
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
|
||||
/* { 0x0000003b, 0x0030 } */
|
||||
{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
|
||||
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
|
||||
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
|
||||
{ /* 0x00000009 */
|
||||
.control_value = LE_CACHEABILITY(LE_UC) |
|
||||
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
|
||||
LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
|
||||
LE_PFM(0) | LE_SCF(0),
|
||||
|
||||
/* 0x0010 */
|
||||
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
|
||||
},
|
||||
{
|
||||
/* 0x00000038 */
|
||||
.control_value = LE_CACHEABILITY(LE_PAGETABLE) |
|
||||
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
|
||||
LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
|
||||
LE_PFM(0) | LE_SCF(0),
|
||||
/* 0x0030 */
|
||||
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
|
||||
},
|
||||
{
|
||||
/* 0x0000003b */
|
||||
.control_value = LE_CACHEABILITY(LE_WB) |
|
||||
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
|
||||
LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
|
||||
LE_PFM(0) | LE_SCF(0),
|
||||
/* 0x0030 */
|
||||
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
|
||||
},
|
||||
};
|
||||
|
||||
/* NOTE: the LE_TGT_CACHE is not used on Broxton */
|
||||
static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
|
||||
/* { 0x00000009, 0x0010 } */
|
||||
{ (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) |
|
||||
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
|
||||
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) },
|
||||
/* { 0x00000038, 0x0030 } */
|
||||
{ (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
|
||||
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
|
||||
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) },
|
||||
/* { 0x0000003b, 0x0030 } */
|
||||
{ (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) |
|
||||
LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)),
|
||||
(L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }
|
||||
{
|
||||
/* 0x00000009 */
|
||||
.control_value = LE_CACHEABILITY(LE_UC) |
|
||||
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
|
||||
LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
|
||||
LE_PFM(0) | LE_SCF(0),
|
||||
|
||||
/* 0x0010 */
|
||||
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
|
||||
},
|
||||
{
|
||||
/* 0x00000038 */
|
||||
.control_value = LE_CACHEABILITY(LE_PAGETABLE) |
|
||||
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
|
||||
LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
|
||||
LE_PFM(0) | LE_SCF(0),
|
||||
|
||||
/* 0x0030 */
|
||||
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
|
||||
},
|
||||
{
|
||||
/* 0x00000039 */
|
||||
.control_value = LE_CACHEABILITY(LE_UC) |
|
||||
LE_TGT_CACHE(LE_TC_LLC_ELLC) |
|
||||
LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
|
||||
LE_PFM(0) | LE_SCF(0),
|
||||
|
||||
/* 0x0030 */
|
||||
.l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -1109,6 +1109,11 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
|
|||
/* WaDisableGafsUnitClkGating:skl */
|
||||
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
|
||||
|
||||
/* WaInPlaceDecompressionHang:skl */
|
||||
if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
|
||||
WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
|
||||
/* WaDisableLSQCROPERFforOCL:skl */
|
||||
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
|
||||
if (ret)
|
||||
|
@ -1178,6 +1183,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
|
|||
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
|
||||
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
|
||||
|
||||
/* WaInPlaceDecompressionHang:bxt */
|
||||
if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
|
||||
WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1225,6 +1235,10 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
|
|||
GEN7_HALF_SLICE_CHICKEN1,
|
||||
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
|
||||
|
||||
/* WaInPlaceDecompressionHang:kbl */
|
||||
WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
|
||||
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
|
||||
|
||||
/* WaDisableLSQCROPERFforOCL:kbl */
|
||||
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
|
||||
if (ret)
|
||||
|
@ -1305,7 +1319,8 @@ static int init_render_ring(struct intel_engine_cs *engine)
|
|||
if (IS_GEN(dev_priv, 6, 7))
|
||||
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
|
||||
|
||||
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
|
||||
if (INTEL_INFO(dev_priv)->gen >= 6)
|
||||
I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
|
||||
|
||||
return init_workarounds_ring(engine);
|
||||
}
|
||||
|
|
|
@ -1078,6 +1078,7 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_encoder *encoder;
|
||||
enum pipe pipe;
|
||||
|
||||
/*
|
||||
|
@ -1113,6 +1114,12 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_hpd_init(dev_priv);
|
||||
|
||||
/* Re-enable the ADPA, if we have one */
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
if (encoder->type == INTEL_OUTPUT_ANALOG)
|
||||
intel_crt_reset(&encoder->base);
|
||||
}
|
||||
|
||||
i915_redisable_vga_power_on(&dev_priv->drm);
|
||||
}
|
||||
|
||||
|
@ -1126,6 +1133,8 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
|
|||
synchronize_irq(dev_priv->drm.irq);
|
||||
|
||||
intel_power_sequencer_reset(dev_priv);
|
||||
|
||||
intel_hpd_poll_init(dev_priv);
|
||||
}
|
||||
|
||||
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
|
||||
|
|
Loading…
Reference in New Issue