Merge tag 'drm-intel-fixes-2014-04-25' of git://anongit.freedesktop.org/drm-intel into drm-next

Fix regression with DVI and fix warns, and GM45 boot regression.

* tag 'drm-intel-fixes-2014-04-25' of git://anongit.freedesktop.org/drm-intel:
  drm/i915: Move all ring resets before setting the HWS page
  drm/i915: Don't WARN nor handle unexpected hpd interrupts on gmch platforms
  drm/i915: Allow full PPGTT with param override
  drm/i915: Discard BIOS framebuffers too small to accommodate chosen mode
  drm/i915: get power domain in case the BIOS enabled eDP VDD
  drm/i915: Don't check gmch state on inherited configs
  drm/i915: Allow user modes to exceed DVI 165MHz limit
This commit is contained in:
Dave Airlie 2014-04-28 09:14:54 +10:00
commit 6f19e7e5ae
10 changed files with 96 additions and 36 deletions

View File

@ -50,7 +50,7 @@ bool intel_enable_ppgtt(struct drm_device *dev, bool full)
/* Full ppgtt disabled by default for now due to issues. */ /* Full ppgtt disabled by default for now due to issues. */
if (full) if (full)
return false; /* HAS_PPGTT(dev) */ return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2);
else else
return HAS_ALIASING_PPGTT(dev); return HAS_ALIASING_PPGTT(dev);
} }

View File

@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
spin_lock(&dev_priv->irq_lock); spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) { for (i = 1; i < HPD_NUM_PINS; i++) {
WARN_ONCE(hpd[i] & hotplug_trigger && if (hpd[i] & hotplug_trigger &&
dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
"Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", /*
hotplug_trigger, i, hpd[i]); * On GMCH platforms the interrupt mask bits only
* prevent irq generation, not the setting of the
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
"Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
hotplug_trigger, i, hpd[i]);
continue;
}
if (!(hpd[i] & hotplug_trigger) || if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)

View File

@ -827,6 +827,7 @@ enum punit_power_well {
# define MI_FLUSH_ENABLE (1 << 12) # define MI_FLUSH_ENABLE (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14) # define ASYNC_FLIP_PERF_DISABLE (1 << 14)
# define MODE_IDLE (1 << 9) # define MODE_IDLE (1 << 9)
# define STOP_RING (1 << 8)
#define GEN6_GT_MODE 0x20d0 #define GEN6_GT_MODE 0x20d0
#define GEN7_GT_MODE 0x7008 #define GEN7_GT_MODE 0x7008

View File

@ -9654,11 +9654,22 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(pipe_src_w); PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h); PIPE_CONF_CHECK_I(pipe_src_h);
PIPE_CONF_CHECK_I(gmch_pfit.control); /*
/* pfit ratios are autocomputed by the hw on gen4+ */ * FIXME: BIOS likes to set up a cloned config with lvds+external
if (INTEL_INFO(dev)->gen < 4) * screen. Since we don't yet re-compute the pipe config when moving
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); * just the lvds port away to another pipe the sw tracking won't match.
PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); *
* Proper atomic modesets with recomputed global state will fix this.
* Until then just don't check gmch state for inherited modes.
*/
if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
PIPE_CONF_CHECK_I(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
if (INTEL_INFO(dev)->gen < 4)
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
}
PIPE_CONF_CHECK_I(pch_pfit.enabled); PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) { if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_I(pch_pfit.pos); PIPE_CONF_CHECK_I(pch_pfit.pos);
@ -11616,6 +11627,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
base.head) { base.head) {
memset(&crtc->config, 0, sizeof(crtc->config)); memset(&crtc->config, 0, sizeof(crtc->config));
crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
crtc->active = dev_priv->display.get_pipe_config(crtc, crtc->active = dev_priv->display.get_pipe_config(crtc,
&crtc->config); &crtc->config);

View File

@ -3619,7 +3619,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
{ {
struct drm_connector *connector = &intel_connector->base; struct drm_connector *connector = &intel_connector->base;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev; struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = NULL; struct drm_display_mode *fixed_mode = NULL;
bool has_dpcd; bool has_dpcd;
@ -3629,6 +3630,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!is_edp(intel_dp)) if (!is_edp(intel_dp))
return true; return true;
/* The VDD bit needs a power domain reference, so if the bit is already
* enabled when we boot, grab this reference. */
if (edp_have_panel_vdd(intel_dp)) {
enum intel_display_power_domain power_domain;
power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain);
}
/* Cache DPCD and EDID for edp. */ /* Cache DPCD and EDID for edp. */
intel_edp_panel_vdd_on(intel_dp); intel_edp_panel_vdd_on(intel_dp);
has_dpcd = intel_dp_get_dpcd(intel_dp); has_dpcd = intel_dp_get_dpcd(intel_dp);

View File

@ -236,7 +236,8 @@ struct intel_crtc_config {
* tracked with quirk flags so that fastboot and state checker can act * tracked with quirk flags so that fastboot and state checker can act
* accordingly. * accordingly.
*/ */
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
#define PIPE_CONFIG_QUIRK_INHERITED_MODE (1<<1) /* mode inherited from firmware */
unsigned long quirks; unsigned long quirks;
/* User requested mode, only valid as a starting point to /* User requested mode, only valid as a starting point to

View File

@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper,
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (intel_fb &&
(sizes->fb_width > intel_fb->base.width ||
sizes->fb_height > intel_fb->base.height)) {
DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
" releasing it\n",
intel_fb->base.width, intel_fb->base.height,
sizes->fb_width, sizes->fb_height);
drm_framebuffer_unreference(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || WARN_ON(!intel_fb->obj)) { if (!intel_fb || WARN_ON(!intel_fb->obj)) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n"); DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes); ret = intelfb_alloc(helper, sizes);

View File

@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
} }
} }
static int hdmi_portclock_limit(struct intel_hdmi *hdmi) static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
{ {
struct drm_device *dev = intel_hdmi_to_dev(hdmi); struct drm_device *dev = intel_hdmi_to_dev(hdmi);
if (!hdmi->has_hdmi_sink || IS_G4X(dev)) if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
return 165000; return 165000;
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000; return 300000;
@ -837,7 +837,8 @@ static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector, intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
true))
return MODE_CLOCK_HIGH; return MODE_CLOCK_HIGH;
if (mode->clock < 20000) if (mode->clock < 20000)
return MODE_CLOCK_LOW; return MODE_CLOCK_LOW;
@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev; struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2; int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
int portclock_limit = hdmi_portclock_limit(intel_hdmi); int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
int desired_bpp; int desired_bpp;
if (intel_hdmi->color_range_auto) { if (intel_hdmi->color_range_auto) {

View File

@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
I915_WRITE(HWS_PGA, addr); I915_WRITE(HWS_PGA, addr);
} }
static bool stop_ring(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = to_i915(ring->dev);
if (!IS_GEN2(ring->dev)) {
I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
return false;
}
}
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
if (!IS_GEN2(ring->dev)) {
(void)I915_READ_CTL(ring);
I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
}
return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
}
static int init_ring_common(struct intel_ring_buffer *ring) static int init_ring_common(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = ring->obj; struct drm_i915_gem_object *obj = ring->obj;
int ret = 0; int ret = 0;
u32 head;
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
/* Stop the ring if it's running. */ if (!stop_ring(ring)) {
I915_WRITE_CTL(ring, 0); /* G45 ring initialization often fails to reset head to zero */
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
else
ring_setup_phys_status_page(ring);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
if (head != 0) {
DRM_DEBUG_KMS("%s head not reset to zero " DRM_DEBUG_KMS("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n", "ctl %08x head %08x tail %08x start %08x\n",
ring->name, ring->name,
@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_TAIL(ring), I915_READ_TAIL(ring),
I915_READ_START(ring)); I915_READ_START(ring));
I915_WRITE_HEAD(ring, 0); if (!stop_ring(ring)) {
if (I915_READ_HEAD(ring) & HEAD_ADDR) {
DRM_ERROR("failed to set %s head to zero " DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n", "ctl %08x head %08x tail %08x start %08x\n",
ring->name, ring->name,
@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_READ_HEAD(ring), I915_READ_HEAD(ring),
I915_READ_TAIL(ring), I915_READ_TAIL(ring),
I915_READ_START(ring)); I915_READ_START(ring));
ret = -EIO;
goto out;
} }
} }
if (I915_NEED_GFX_HWS(dev))
intel_ring_setup_status_page(ring);
else
ring_setup_phys_status_page(ring);
/* Initialize the ring. This must happen _after_ we've cleared the ring /* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers * registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring * also enforces ordering), otherwise the hw might lose the new ring

View File

@ -34,6 +34,7 @@ struct intel_hw_status_page {
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
#define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base)) #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
enum intel_ring_hangcheck_action { enum intel_ring_hangcheck_action {
HANGCHECK_IDLE = 0, HANGCHECK_IDLE = 0,