Merge tag 'drm-intel-next-fixes-2017-09-07' of git://anongit.freedesktop.org/git/drm-intel
Pull i916 drm fixes from Rodrigo Vivi: "Since Dave is on paternity leave we are sending drm/i915 fixes for v4.14-rc1 directly to you as he had asked us to do. The most critical ones are the GPU reset fix for gen2-4 and GVT fix for a regression that is blocking gvt init to work on your tree. The rest is general fixes for patches coming from drm-next" Acked-by: Dave Airlie <airlied@redhat.com> * tag 'drm-intel-next-fixes-2017-09-07' of git://anongit.freedesktop.org/git/drm-intel: drm/i915: Re-enable GTT following a device reset drm/i915: Annotate user relocs with __user drm/i915: Silence sparse by using gfp_t drm/i915: Add __rcu to radix tree slot pointer drm/i915: Fix the missing PPAT cache attributes on CNL drm/i915/gvt: Remove one duplicated MMIO drm/i915: Fix enum pipe vs. enum transcoder for the PCH transcoder drm/i915: Make i2c lock ops static drm/i915: Make i9xx_load_ycbcr_conversion_matrix() static drm/i915/edp: Increase T12 panel delay to 900 ms to fix DP AUX CH timeouts drm/i915: Ignore duplicate VMA stored within the per-object handle LUT drm/i915: Skip fence alignemnt check for the CCS plane drm/i915: Treat fb->offsets[] as a raw byte offset instead of a linear offset drm/i915: Always wake the device to flush the GTT drm/i915: Recreate vmapping even when the object is pinned drm/i915: Quietly cancel FBC activation if CRTC is turned off before worker
This commit is contained in:
commit
7d95565612
|
@ -2659,7 +2659,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
|
||||||
MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS);
|
MMIO_D(HSW_PWR_WELL_CTL_BIOS(SKL_DISP_PW_MISC_IO), D_SKL_PLUS);
|
||||||
MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
|
MMIO_DH(HSW_PWR_WELL_CTL_DRIVER(SKL_DISP_PW_MISC_IO), D_SKL_PLUS, NULL,
|
||||||
skl_power_well_ctl_write);
|
skl_power_well_ctl_write);
|
||||||
MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write);
|
|
||||||
|
|
||||||
MMIO_D(0xa210, D_SKL_PLUS);
|
MMIO_D(0xa210, D_SKL_PLUS);
|
||||||
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
|
||||||
|
|
|
@ -1073,7 +1073,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
|
||||||
goto unpin_src;
|
goto unpin_src;
|
||||||
}
|
}
|
||||||
|
|
||||||
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
|
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_FORCE_WB);
|
||||||
if (IS_ERR(dst))
|
if (IS_ERR(dst))
|
||||||
goto unpin_dst;
|
goto unpin_dst;
|
||||||
|
|
||||||
|
|
|
@ -1891,9 +1891,15 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Everything depends on having the GTT running, so we need to start
|
* Everything depends on having the GTT running, so we need to start
|
||||||
* there. Fortunately we don't need to do this unless we reset the
|
* there.
|
||||||
* chip at a PCI level.
|
*/
|
||||||
*
|
ret = i915_ggtt_enable_hw(i915);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Failed to re-enable GGTT following reset %d\n", ret);
|
||||||
|
goto error;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
* Next we need to restore the context, but we don't use those
|
* Next we need to restore the context, but we don't use those
|
||||||
* yet either...
|
* yet either...
|
||||||
*
|
*
|
||||||
|
|
|
@ -3479,6 +3479,9 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
|
||||||
enum i915_map_type {
|
enum i915_map_type {
|
||||||
I915_MAP_WB = 0,
|
I915_MAP_WB = 0,
|
||||||
I915_MAP_WC,
|
I915_MAP_WC,
|
||||||
|
#define I915_MAP_OVERRIDE BIT(31)
|
||||||
|
I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
|
||||||
|
I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -695,12 +695,11 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
|
||||||
switch (obj->base.write_domain) {
|
switch (obj->base.write_domain) {
|
||||||
case I915_GEM_DOMAIN_GTT:
|
case I915_GEM_DOMAIN_GTT:
|
||||||
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
|
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
|
||||||
if (intel_runtime_pm_get_if_in_use(dev_priv)) {
|
intel_runtime_pm_get(dev_priv);
|
||||||
spin_lock_irq(&dev_priv->uncore.lock);
|
spin_lock_irq(&dev_priv->uncore.lock);
|
||||||
POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
|
POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
|
||||||
spin_unlock_irq(&dev_priv->uncore.lock);
|
spin_unlock_irq(&dev_priv->uncore.lock);
|
||||||
intel_runtime_pm_put(dev_priv);
|
intel_runtime_pm_put(dev_priv);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
intel_fb_obj_flush(obj,
|
intel_fb_obj_flush(obj,
|
||||||
|
@ -2213,7 +2212,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
|
||||||
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
|
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
|
||||||
{
|
{
|
||||||
struct radix_tree_iter iter;
|
struct radix_tree_iter iter;
|
||||||
void **slot;
|
void __rcu **slot;
|
||||||
|
|
||||||
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
|
radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
|
||||||
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
|
radix_tree_delete(&obj->mm.get_page.radix, iter.index);
|
||||||
|
@ -2553,6 +2552,9 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
|
||||||
GEM_BUG_ON(i != n_pages);
|
GEM_BUG_ON(i != n_pages);
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
default:
|
||||||
|
MISSING_CASE(type);
|
||||||
|
/* fallthrough to use PAGE_KERNEL anyway */
|
||||||
case I915_MAP_WB:
|
case I915_MAP_WB:
|
||||||
pgprot = PAGE_KERNEL;
|
pgprot = PAGE_KERNEL;
|
||||||
break;
|
break;
|
||||||
|
@ -2583,7 +2585,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
pinned = true;
|
pinned = !(type & I915_MAP_OVERRIDE);
|
||||||
|
type &= ~I915_MAP_OVERRIDE;
|
||||||
|
|
||||||
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
||||||
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
|
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
|
||||||
ret = ____i915_gem_object_get_pages(obj);
|
ret = ____i915_gem_object_get_pages(obj);
|
||||||
|
@ -3258,7 +3262,13 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
|
||||||
|
|
||||||
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
|
vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
|
||||||
|
|
||||||
if (!i915_vma_is_ggtt(vma))
|
GEM_BUG_ON(vma->obj != obj);
|
||||||
|
|
||||||
|
/* We allow the process to have multiple handles to the same
|
||||||
|
* vma, in the same fd namespace, by virtue of flink/open.
|
||||||
|
*/
|
||||||
|
GEM_BUG_ON(!vma->open_count);
|
||||||
|
if (!--vma->open_count && !i915_vma_is_ggtt(vma))
|
||||||
i915_vma_close(vma);
|
i915_vma_close(vma);
|
||||||
|
|
||||||
list_del(&lut->obj_link);
|
list_del(&lut->obj_link);
|
||||||
|
|
|
@ -285,7 +285,7 @@ static int eb_create(struct i915_execbuffer *eb)
|
||||||
* direct lookup.
|
* direct lookup.
|
||||||
*/
|
*/
|
||||||
do {
|
do {
|
||||||
unsigned int flags;
|
gfp_t flags;
|
||||||
|
|
||||||
/* While we can still reduce the allocation size, don't
|
/* While we can still reduce the allocation size, don't
|
||||||
* raise a warning and allow the allocation to fail.
|
* raise a warning and allow the allocation to fail.
|
||||||
|
@ -720,6 +720,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
|
||||||
goto err_obj;
|
goto err_obj;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
vma->open_count++;
|
||||||
list_add(&lut->obj_link, &obj->lut_list);
|
list_add(&lut->obj_link, &obj->lut_list);
|
||||||
list_add(&lut->ctx_link, &eb->ctx->handles_list);
|
list_add(&lut->ctx_link, &eb->ctx->handles_list);
|
||||||
lut->ctx = eb->ctx;
|
lut->ctx = eb->ctx;
|
||||||
|
@ -1070,7 +1071,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
|
||||||
return PTR_ERR(obj);
|
return PTR_ERR(obj);
|
||||||
|
|
||||||
cmd = i915_gem_object_pin_map(obj,
|
cmd = i915_gem_object_pin_map(obj,
|
||||||
cache->has_llc ? I915_MAP_WB : I915_MAP_WC);
|
cache->has_llc ?
|
||||||
|
I915_MAP_FORCE_WB :
|
||||||
|
I915_MAP_FORCE_WC);
|
||||||
i915_gem_object_unpin_pages(obj);
|
i915_gem_object_unpin_pages(obj);
|
||||||
if (IS_ERR(cmd))
|
if (IS_ERR(cmd))
|
||||||
return PTR_ERR(cmd);
|
return PTR_ERR(cmd);
|
||||||
|
@ -1526,7 +1529,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
|
||||||
min_t(u64, BIT_ULL(31), size - copied);
|
min_t(u64, BIT_ULL(31), size - copied);
|
||||||
|
|
||||||
if (__copy_from_user((char *)relocs + copied,
|
if (__copy_from_user((char *)relocs + copied,
|
||||||
(char *)urelocs + copied,
|
(char __user *)urelocs + copied,
|
||||||
len)) {
|
len)) {
|
||||||
kvfree(relocs);
|
kvfree(relocs);
|
||||||
err = -EFAULT;
|
err = -EFAULT;
|
||||||
|
|
|
@ -2754,10 +2754,10 @@ static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv)
|
||||||
I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
|
I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
|
||||||
I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
|
I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
|
||||||
I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
|
I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC);
|
||||||
I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
|
I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
|
||||||
I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
|
I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
|
||||||
I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
|
I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
|
||||||
I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
|
I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
|
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
|
||||||
|
|
|
@ -38,7 +38,7 @@ TRACE_EVENT(intel_cpu_fifo_underrun,
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(intel_pch_fifo_underrun,
|
TRACE_EVENT(intel_pch_fifo_underrun,
|
||||||
TP_PROTO(struct drm_i915_private *dev_priv, enum transcoder pch_transcoder),
|
TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pch_transcoder),
|
||||||
TP_ARGS(dev_priv, pch_transcoder),
|
TP_ARGS(dev_priv, pch_transcoder),
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
|
@ -48,7 +48,7 @@ TRACE_EVENT(intel_pch_fifo_underrun,
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
enum pipe pipe = (enum pipe)pch_transcoder;
|
enum pipe pipe = pch_transcoder;
|
||||||
__entry->pipe = pipe;
|
__entry->pipe = pipe;
|
||||||
__entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
|
__entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
|
||||||
__entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
|
__entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
|
||||||
|
|
|
@ -59,6 +59,12 @@ struct i915_vma {
|
||||||
u32 fence_size;
|
u32 fence_size;
|
||||||
u32 fence_alignment;
|
u32 fence_alignment;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Count of the number of times this vma has been opened by different
|
||||||
|
* handles (but same file) for execbuf, i.e. the number of aliases
|
||||||
|
* that exist in the ctx->handle_vmas LUT for this vma.
|
||||||
|
*/
|
||||||
|
unsigned int open_count;
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
/**
|
/**
|
||||||
* How many users have pinned this object in GTT space. The following
|
* How many users have pinned this object in GTT space. The following
|
||||||
|
|
|
@ -107,7 +107,7 @@ static void ctm_mult_by_limited(uint64_t *result, int64_t *input)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
|
static void i9xx_load_ycbcr_conversion_matrix(struct intel_crtc *intel_crtc)
|
||||||
{
|
{
|
||||||
int pipe = intel_crtc->pipe;
|
int pipe = intel_crtc->pipe;
|
||||||
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
|
||||||
|
|
|
@ -2288,17 +2288,13 @@ void intel_add_fb_offsets(int *x, int *y,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static u32 __intel_adjust_tile_offset(int *x, int *y,
|
||||||
* Input tile dimensions and pitch must already be
|
unsigned int tile_width,
|
||||||
* rotated to match x and y, and in pixel units.
|
unsigned int tile_height,
|
||||||
*/
|
unsigned int tile_size,
|
||||||
static u32 _intel_adjust_tile_offset(int *x, int *y,
|
unsigned int pitch_tiles,
|
||||||
unsigned int tile_width,
|
u32 old_offset,
|
||||||
unsigned int tile_height,
|
u32 new_offset)
|
||||||
unsigned int tile_size,
|
|
||||||
unsigned int pitch_tiles,
|
|
||||||
u32 old_offset,
|
|
||||||
u32 new_offset)
|
|
||||||
{
|
{
|
||||||
unsigned int pitch_pixels = pitch_tiles * tile_width;
|
unsigned int pitch_pixels = pitch_tiles * tile_width;
|
||||||
unsigned int tiles;
|
unsigned int tiles;
|
||||||
|
@ -2319,18 +2315,13 @@ static u32 _intel_adjust_tile_offset(int *x, int *y,
|
||||||
return new_offset;
|
return new_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static u32 _intel_adjust_tile_offset(int *x, int *y,
|
||||||
* Adjust the tile offset by moving the difference into
|
const struct drm_framebuffer *fb, int plane,
|
||||||
* the x/y offsets.
|
unsigned int rotation,
|
||||||
*/
|
u32 old_offset, u32 new_offset)
|
||||||
static u32 intel_adjust_tile_offset(int *x, int *y,
|
|
||||||
const struct intel_plane_state *state, int plane,
|
|
||||||
u32 old_offset, u32 new_offset)
|
|
||||||
{
|
{
|
||||||
const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev);
|
const struct drm_i915_private *dev_priv = to_i915(fb->dev);
|
||||||
const struct drm_framebuffer *fb = state->base.fb;
|
|
||||||
unsigned int cpp = fb->format->cpp[plane];
|
unsigned int cpp = fb->format->cpp[plane];
|
||||||
unsigned int rotation = state->base.rotation;
|
|
||||||
unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
|
unsigned int pitch = intel_fb_pitch(fb, plane, rotation);
|
||||||
|
|
||||||
WARN_ON(new_offset > old_offset);
|
WARN_ON(new_offset > old_offset);
|
||||||
|
@ -2349,9 +2340,9 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
|
||||||
pitch_tiles = pitch / (tile_width * cpp);
|
pitch_tiles = pitch / (tile_width * cpp);
|
||||||
}
|
}
|
||||||
|
|
||||||
_intel_adjust_tile_offset(x, y, tile_width, tile_height,
|
__intel_adjust_tile_offset(x, y, tile_width, tile_height,
|
||||||
tile_size, pitch_tiles,
|
tile_size, pitch_tiles,
|
||||||
old_offset, new_offset);
|
old_offset, new_offset);
|
||||||
} else {
|
} else {
|
||||||
old_offset += *y * pitch + *x * cpp;
|
old_offset += *y * pitch + *x * cpp;
|
||||||
|
|
||||||
|
@ -2362,6 +2353,19 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
|
||||||
return new_offset;
|
return new_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Adjust the tile offset by moving the difference into
|
||||||
|
* the x/y offsets.
|
||||||
|
*/
|
||||||
|
static u32 intel_adjust_tile_offset(int *x, int *y,
|
||||||
|
const struct intel_plane_state *state, int plane,
|
||||||
|
u32 old_offset, u32 new_offset)
|
||||||
|
{
|
||||||
|
return _intel_adjust_tile_offset(x, y, state->base.fb, plane,
|
||||||
|
state->base.rotation,
|
||||||
|
old_offset, new_offset);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Computes the linear offset to the base tile and adjusts
|
* Computes the linear offset to the base tile and adjusts
|
||||||
* x, y. bytes per pixel is assumed to be a power-of-two.
|
* x, y. bytes per pixel is assumed to be a power-of-two.
|
||||||
|
@ -2413,9 +2417,9 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
|
||||||
offset = (tile_rows * pitch_tiles + tiles) * tile_size;
|
offset = (tile_rows * pitch_tiles + tiles) * tile_size;
|
||||||
offset_aligned = offset & ~alignment;
|
offset_aligned = offset & ~alignment;
|
||||||
|
|
||||||
_intel_adjust_tile_offset(x, y, tile_width, tile_height,
|
__intel_adjust_tile_offset(x, y, tile_width, tile_height,
|
||||||
tile_size, pitch_tiles,
|
tile_size, pitch_tiles,
|
||||||
offset, offset_aligned);
|
offset, offset_aligned);
|
||||||
} else {
|
} else {
|
||||||
offset = *y * pitch + *x * cpp;
|
offset = *y * pitch + *x * cpp;
|
||||||
offset_aligned = offset & ~alignment;
|
offset_aligned = offset & ~alignment;
|
||||||
|
@ -2447,16 +2451,24 @@ u32 intel_compute_tile_offset(int *x, int *y,
|
||||||
rotation, alignment);
|
rotation, alignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Convert the fb->offset[] linear offset into x/y offsets */
|
/* Convert the fb->offset[] into x/y offsets */
|
||||||
static void intel_fb_offset_to_xy(int *x, int *y,
|
static int intel_fb_offset_to_xy(int *x, int *y,
|
||||||
const struct drm_framebuffer *fb, int plane)
|
const struct drm_framebuffer *fb, int plane)
|
||||||
{
|
{
|
||||||
unsigned int cpp = fb->format->cpp[plane];
|
struct drm_i915_private *dev_priv = to_i915(fb->dev);
|
||||||
unsigned int pitch = fb->pitches[plane];
|
|
||||||
u32 linear_offset = fb->offsets[plane];
|
|
||||||
|
|
||||||
*y = linear_offset / pitch;
|
if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
|
||||||
*x = linear_offset % pitch / cpp;
|
fb->offsets[plane] % intel_tile_size(dev_priv))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
*x = 0;
|
||||||
|
*y = 0;
|
||||||
|
|
||||||
|
_intel_adjust_tile_offset(x, y,
|
||||||
|
fb, plane, DRM_MODE_ROTATE_0,
|
||||||
|
fb->offsets[plane], 0);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
|
static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier)
|
||||||
|
@ -2523,12 +2535,18 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
|
||||||
unsigned int cpp, size;
|
unsigned int cpp, size;
|
||||||
u32 offset;
|
u32 offset;
|
||||||
int x, y;
|
int x, y;
|
||||||
|
int ret;
|
||||||
|
|
||||||
cpp = fb->format->cpp[i];
|
cpp = fb->format->cpp[i];
|
||||||
width = drm_framebuffer_plane_width(fb->width, fb, i);
|
width = drm_framebuffer_plane_width(fb->width, fb, i);
|
||||||
height = drm_framebuffer_plane_height(fb->height, fb, i);
|
height = drm_framebuffer_plane_height(fb->height, fb, i);
|
||||||
|
|
||||||
intel_fb_offset_to_xy(&x, &y, fb, i);
|
ret = intel_fb_offset_to_xy(&x, &y, fb, i);
|
||||||
|
if (ret) {
|
||||||
|
DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
|
||||||
|
i, fb->offsets[i]);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
|
if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
|
||||||
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) {
|
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) {
|
||||||
|
@ -2539,11 +2557,13 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
|
||||||
int ccs_x, ccs_y;
|
int ccs_x, ccs_y;
|
||||||
|
|
||||||
intel_tile_dims(fb, i, &tile_width, &tile_height);
|
intel_tile_dims(fb, i, &tile_width, &tile_height);
|
||||||
|
tile_width *= hsub;
|
||||||
|
tile_height *= vsub;
|
||||||
|
|
||||||
ccs_x = (x * hsub) % (tile_width * hsub);
|
ccs_x = (x * hsub) % tile_width;
|
||||||
ccs_y = (y * vsub) % (tile_height * vsub);
|
ccs_y = (y * vsub) % tile_height;
|
||||||
main_x = intel_fb->normal[0].x % (tile_width * hsub);
|
main_x = intel_fb->normal[0].x % tile_width;
|
||||||
main_y = intel_fb->normal[0].y % (tile_height * vsub);
|
main_y = intel_fb->normal[0].y % tile_height;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CCS doesn't have its own x/y offset register, so the intra CCS tile
|
* CCS doesn't have its own x/y offset register, so the intra CCS tile
|
||||||
|
@ -2569,7 +2589,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
|
||||||
* fb layout agrees with the fence layout. We already check that the
|
* fb layout agrees with the fence layout. We already check that the
|
||||||
* fb stride matches the fence stride elsewhere.
|
* fb stride matches the fence stride elsewhere.
|
||||||
*/
|
*/
|
||||||
if (i915_gem_object_is_tiled(intel_fb->obj) &&
|
if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) &&
|
||||||
(x + width) * cpp > fb->pitches[i]) {
|
(x + width) * cpp > fb->pitches[i]) {
|
||||||
DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
|
DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
|
||||||
i, fb->offsets[i]);
|
i, fb->offsets[i]);
|
||||||
|
@ -2632,10 +2652,10 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
|
||||||
* We only keep the x/y offsets, so push all of the
|
* We only keep the x/y offsets, so push all of the
|
||||||
* gtt offset into the x/y offsets.
|
* gtt offset into the x/y offsets.
|
||||||
*/
|
*/
|
||||||
_intel_adjust_tile_offset(&x, &y,
|
__intel_adjust_tile_offset(&x, &y,
|
||||||
tile_width, tile_height,
|
tile_width, tile_height,
|
||||||
tile_size, pitch_tiles,
|
tile_size, pitch_tiles,
|
||||||
gtt_offset_rotated * tile_size, 0);
|
gtt_offset_rotated * tile_size, 0);
|
||||||
|
|
||||||
gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
|
gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
|
||||||
|
|
||||||
|
|
|
@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
|
||||||
* seems sufficient to avoid this problem.
|
* seems sufficient to avoid this problem.
|
||||||
*/
|
*/
|
||||||
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
|
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
|
||||||
vbt.t11_t12 = max_t(u16, vbt.t11_t12, 800 * 10);
|
vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10);
|
||||||
DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
|
DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
|
||||||
vbt.t11_t12);
|
vbt.t11_t12);
|
||||||
}
|
}
|
||||||
|
|
|
@ -406,9 +406,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
|
||||||
struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
|
struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
|
||||||
|
|
||||||
if (drm_crtc_vblank_get(&crtc->base)) {
|
if (drm_crtc_vblank_get(&crtc->base)) {
|
||||||
DRM_ERROR("vblank not available for FBC on pipe %c\n",
|
/* CRTC is now off, leave FBC deactivated */
|
||||||
pipe_name(crtc->pipe));
|
|
||||||
|
|
||||||
mutex_lock(&fbc->lock);
|
mutex_lock(&fbc->lock);
|
||||||
work->scheduled = false;
|
work->scheduled = false;
|
||||||
mutex_unlock(&fbc->lock);
|
mutex_unlock(&fbc->lock);
|
||||||
|
|
|
@ -187,11 +187,11 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
|
static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||||
enum transcoder pch_transcoder,
|
enum pipe pch_transcoder,
|
||||||
bool enable)
|
bool enable)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
|
uint32_t bit = (pch_transcoder == PIPE_A) ?
|
||||||
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
|
SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
|
||||||
|
|
||||||
if (enable)
|
if (enable)
|
||||||
|
@ -203,7 +203,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||||
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
|
static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||||
enum transcoder pch_transcoder = (enum transcoder) crtc->pipe;
|
enum pipe pch_transcoder = crtc->pipe;
|
||||||
uint32_t serr_int = I915_READ(SERR_INT);
|
uint32_t serr_int = I915_READ(SERR_INT);
|
||||||
|
|
||||||
lockdep_assert_held(&dev_priv->irq_lock);
|
lockdep_assert_held(&dev_priv->irq_lock);
|
||||||
|
@ -215,12 +215,12 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
|
||||||
POSTING_READ(SERR_INT);
|
POSTING_READ(SERR_INT);
|
||||||
|
|
||||||
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
|
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
|
||||||
DRM_ERROR("pch fifo underrun on pch transcoder %s\n",
|
DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
|
||||||
transcoder_name(pch_transcoder));
|
pipe_name(pch_transcoder));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||||
enum transcoder pch_transcoder,
|
enum pipe pch_transcoder,
|
||||||
bool enable, bool old)
|
bool enable, bool old)
|
||||||
{
|
{
|
||||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||||
|
@ -238,8 +238,8 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
|
||||||
|
|
||||||
if (old && I915_READ(SERR_INT) &
|
if (old && I915_READ(SERR_INT) &
|
||||||
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
|
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
|
||||||
DRM_ERROR("uncleared pch fifo underrun on pch transcoder %s\n",
|
DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
|
||||||
transcoder_name(pch_transcoder));
|
pipe_name(pch_transcoder));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -395,8 +395,8 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
|
||||||
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
|
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
|
||||||
false)) {
|
false)) {
|
||||||
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
|
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
|
||||||
DRM_ERROR("PCH transcoder %s FIFO underrun\n",
|
DRM_ERROR("PCH transcoder %c FIFO underrun\n",
|
||||||
transcoder_name(pch_transcoder));
|
pipe_name(pch_transcoder));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -649,7 +649,7 @@ static void gmbus_unlock_bus(struct i2c_adapter *adapter,
|
||||||
mutex_unlock(&dev_priv->gmbus_mutex);
|
mutex_unlock(&dev_priv->gmbus_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct i2c_lock_operations gmbus_lock_ops = {
|
static const struct i2c_lock_operations gmbus_lock_ops = {
|
||||||
.lock_bus = gmbus_lock_bus,
|
.lock_bus = gmbus_lock_bus,
|
||||||
.trylock_bus = gmbus_trylock_bus,
|
.trylock_bus = gmbus_trylock_bus,
|
||||||
.unlock_bus = gmbus_unlock_bus,
|
.unlock_bus = gmbus_unlock_bus,
|
||||||
|
|
|
@ -2980,7 +2980,7 @@ static void proxy_unlock_bus(struct i2c_adapter *adapter,
|
||||||
sdvo->i2c->lock_ops->unlock_bus(sdvo->i2c, flags);
|
sdvo->i2c->lock_ops->unlock_bus(sdvo->i2c, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct i2c_lock_operations proxy_lock_ops = {
|
static const struct i2c_lock_operations proxy_lock_ops = {
|
||||||
.lock_bus = proxy_lock_bus,
|
.lock_bus = proxy_lock_bus,
|
||||||
.trylock_bus = proxy_trylock_bus,
|
.trylock_bus = proxy_trylock_bus,
|
||||||
.unlock_bus = proxy_unlock_bus,
|
.unlock_bus = proxy_unlock_bus,
|
||||||
|
|
Loading…
Reference in New Issue