Merge tag 'drm-intel-next-2015-09-28-merged' of git://anongit.freedesktop.org/drm-intel into drm-next

drm-intel-next-2015-09-28:
- fastboot by default for some systems (Maarten Lankhorts)
- piles of workarounds for bxt and skl
- more fbc work from Paulo
- fix hdmi hotplug detection (Sonika)
- first few patches from Ville to parametrize register macros, prep work for
  typesafe mmio functions
- prep work for nv12 rotation (Tvrtko Ursulin)
- various other bugfixes and improvements all over

I have another backmerge here since things became messy and I didn't
realize you resolved some of them already (usually you complain when
there's a conflict ...).

For 4.4 I plan one more feature round after this and then that's it.
* tag 'drm-intel-next-2015-09-28-merged' of git://anongit.freedesktop.org/drm-intel: (80 commits)
  drm/i915: Update DRIVER_DATE to 20150928
  drm/i915: fix task reference leak in i915_debugfs.c
  drm/i915: Defer adding preallocated stolen objects to the VM list
  drm/i915: Remove extraneous request cancel.
  drm/i915: Enable querying offset of UV plane with intel_plane_obj_offset
  drm/i915: Support NV12 in rotated GGTT mapping
  drm/i915: Support appending to the rotated pages mapping
  drm/i915: Support planar formats in tile height calculations
  drm/i915/bxt: Update revision id for BXT C0
  drm/i915: Parametrize CSR_PROGRAM registers
  drm/i915: Parametrize DDI_BUF_TRANS registers
  drm/i915: Parametrize TV luma/chroma filter registers
  drm/i915: Replace raw numbers with the approproate register name in ILK turbo code
  drm/i915: Parametrize ILK turbo registers
  drm/i915: Parametrize FBC_TAG registers
  drm/i915: Parametrize GEN7_GT_SCRATCH and GEN7_LRA_LIMITS
  drm/i915: Parametrize LRC registers
  drm/i915: Don't pass sdvo_reg to intel_sdvo_select_{ddc, i2c}_bus()
  drm/i915: Ignore "digital output" and "not HDMI output" bits for eDP detection
  drm/i915: Make sure we don't detect eDP on g4x
  ...
This commit is contained in:
Dave Airlie 2015-10-16 09:59:19 +10:00
commit b312785579
41 changed files with 1203 additions and 547 deletions

View File

@ -2069,8 +2069,8 @@ static int i915_execlists(struct seq_file *m, void *data)
seq_printf(m, "%s\n", ring->name); seq_printf(m, "%s\n", ring->name);
status = I915_READ(RING_EXECLIST_STATUS(ring)); status = I915_READ(RING_EXECLIST_STATUS_LO(ring));
ctx_id = I915_READ(RING_EXECLIST_STATUS(ring) + 4); ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring));
seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n",
status, ctx_id); status, ctx_id);
@ -2085,8 +2085,8 @@ static int i915_execlists(struct seq_file *m, void *data)
read_pointer, write_pointer); read_pointer, write_pointer);
for (i = 0; i < 6; i++) { for (i = 0; i < 6; i++) {
status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i); status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i));
ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + 8*i + 4); ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i));
seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n",
i, status, ctx_id); i, status, ctx_id);
@ -2288,9 +2288,13 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
list_for_each_entry_reverse(file, &dev->filelist, lhead) { list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_file_private *file_priv = file->driver_priv;
struct task_struct *task;
seq_printf(m, "\nproc: %s\n", task = get_pid_task(file->pid, PIDTYPE_PID);
get_pid_task(file->pid, PIDTYPE_PID)->comm); if (!task)
return -ESRCH;
seq_printf(m, "\nproc: %s\n", task->comm);
put_task_struct(task);
idr_for_each(&file_priv->context_idr, per_file_ctx, idr_for_each(&file_priv->context_idr, per_file_ctx,
(void *)(unsigned long)m); (void *)(unsigned long)m);
} }

View File

@ -631,17 +631,6 @@ static void gen9_sseu_info_init(struct drm_device *dev)
u32 fuse2, s_enable, ss_disable, eu_disable; u32 fuse2, s_enable, ss_disable, eu_disable;
u8 eu_mask = 0xff; u8 eu_mask = 0xff;
/*
* BXT has a single slice. BXT also has at most 6 EU per subslice,
* and therefore only the lowest 6 bits of the 8-bit EU disable
* fields are valid.
*/
if (IS_BROXTON(dev)) {
s_max = 1;
eu_max = 6;
eu_mask = 0x3f;
}
info = (struct intel_device_info *)&dev_priv->info; info = (struct intel_device_info *)&dev_priv->info;
fuse2 = I915_READ(GEN8_FUSE2); fuse2 = I915_READ(GEN8_FUSE2);
s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
@ -1137,6 +1126,10 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->vbt.child_dev = NULL; dev_priv->vbt.child_dev = NULL;
dev_priv->vbt.child_dev_num = 0; dev_priv->vbt.child_dev_num = 0;
} }
kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
vga_switcheroo_unregister_client(dev->pdev); vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL); vga_client_register(dev->pdev, NULL, NULL, NULL);

View File

@ -1120,7 +1120,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1); s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4); s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT); s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT); s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
@ -1164,7 +1164,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
s->pm_ier = I915_READ(GEN6_PMIER); s->pm_ier = I915_READ(GEN6_PMIER);
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4); s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
/* GT SA CZ domain, 0x100000-0x138124 */ /* GT SA CZ domain, 0x100000-0x138124 */
s->tilectl = I915_READ(TILECTL); s->tilectl = I915_READ(TILECTL);
@ -1202,7 +1202,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1); I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++) for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]); I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count); I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count); I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
@ -1246,7 +1246,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_PMIER, s->pm_ier); I915_WRITE(GEN6_PMIER, s->pm_ier);
for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++) for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]); I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
/* GT SA CZ domain, 0x100000-0x138124 */ /* GT SA CZ domain, 0x100000-0x138124 */
I915_WRITE(TILECTL, s->tilectl); I915_WRITE(TILECTL, s->tilectl);

View File

@ -57,7 +57,7 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20150911" #define DRIVER_DATE "20150928"
#undef WARN_ON #undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */ /* Many gcc seem to no see through this and fall over :( */
@ -890,7 +890,6 @@ struct intel_context {
} legacy_hw_ctx; } legacy_hw_ctx;
/* Execlists */ /* Execlists */
bool rcs_initialized;
struct { struct {
struct drm_i915_gem_object *state; struct drm_i915_gem_object *state;
struct intel_ringbuffer *ringbuf; struct intel_ringbuffer *ringbuf;
@ -949,6 +948,9 @@ struct i915_fbc {
FBC_CHIP_DEFAULT, /* disabled by default on this chip */ FBC_CHIP_DEFAULT, /* disabled by default on this chip */
FBC_ROTATION, /* rotation is not supported */ FBC_ROTATION, /* rotation is not supported */
FBC_IN_DBG_MASTER, /* kernel debugger is active */ FBC_IN_DBG_MASTER, /* kernel debugger is active */
FBC_BAD_STRIDE, /* stride is not supported */
FBC_PIXEL_RATE, /* pixel rate is too big */
FBC_PIXEL_FORMAT /* pixel format is invalid */
} no_fbc_reason; } no_fbc_reason;
bool (*fbc_enabled)(struct drm_i915_private *dev_priv); bool (*fbc_enabled)(struct drm_i915_private *dev_priv);
@ -2015,25 +2017,26 @@ struct drm_i915_gem_object_ops {
/* /*
* Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
* considered to be the frontbuffer for the given plane interface-vise. This * considered to be the frontbuffer for the given plane interface-wise. This
* doesn't mean that the hw necessarily already scans it out, but that any * doesn't mean that the hw necessarily already scans it out, but that any
* rendering (by the cpu or gpu) will land in the frontbuffer eventually. * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
* *
* We have one bit per pipe and per scanout plane type. * We have one bit per pipe and per scanout plane type.
*/ */
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
#define INTEL_FRONTBUFFER_BITS \ #define INTEL_FRONTBUFFER_BITS \
(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES) (INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
#define INTEL_FRONTBUFFER_PRIMARY(pipe) \ #define INTEL_FRONTBUFFER_PRIMARY(pipe) \
(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
#define INTEL_FRONTBUFFER_CURSOR(pipe) \ #define INTEL_FRONTBUFFER_CURSOR(pipe) \
(1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) (1 << (1 + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_SPRITE(pipe) \ #define INTEL_FRONTBUFFER_SPRITE(pipe, plane) \
(1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) (1 << (2 + plane + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_OVERLAY(pipe) \ #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
(1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))) (1 << (2 + INTEL_MAX_SPRITE_BITS_PER_PIPE + (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
(0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
struct drm_i915_gem_object { struct drm_i915_gem_object {
struct drm_gem_object base; struct drm_gem_object base;
@ -2491,6 +2494,11 @@ struct drm_i915_cmd_table {
#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ #define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \
INTEL_DEVID(dev) == 0x1915 || \ INTEL_DEVID(dev) == 0x1915 || \
INTEL_DEVID(dev) == 0x191E) INTEL_DEVID(dev) == 0x191E)
#define IS_SKL_GT3(dev) (IS_SKYLAKE(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
#define IS_SKL_GT4(dev) (IS_SKYLAKE(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0030)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
#define SKL_REVID_A0 (0x0) #define SKL_REVID_A0 (0x0)
@ -2502,7 +2510,7 @@ struct drm_i915_cmd_table {
#define BXT_REVID_A0 (0x0) #define BXT_REVID_A0 (0x0)
#define BXT_REVID_B0 (0x3) #define BXT_REVID_B0 (0x3)
#define BXT_REVID_C0 (0x6) #define BXT_REVID_C0 (0x9)
/* /*
* The genX designation typically refers to the render engine, so render * The genX designation typically refers to the render engine, so render
@ -2581,7 +2589,7 @@ struct drm_i915_cmd_table {
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) #define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) #define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
#define HAS_CSR(dev) (IS_SKYLAKE(dev)) #define HAS_CSR(dev) (IS_GEN9(dev))
#define HAS_GUC_UCODE(dev) (IS_GEN9(dev)) #define HAS_GUC_UCODE(dev) (IS_GEN9(dev))
#define HAS_GUC_SCHED(dev) (IS_GEN9(dev)) #define HAS_GUC_SCHED(dev) (IS_GEN9(dev))
@ -2647,7 +2655,6 @@ struct i915_params {
int enable_cmd_parser; int enable_cmd_parser;
/* leave bools at the end to not create holes */ /* leave bools at the end to not create holes */
bool enable_hangcheck; bool enable_hangcheck;
bool fastboot;
bool prefault_disable; bool prefault_disable;
bool load_detect_test; bool load_detect_test;
bool reset; bool reset;
@ -2738,6 +2745,9 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t mask,
uint32_t bits);
void void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask); ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
void void
@ -2805,8 +2815,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size); size_t size);
struct drm_i915_gem_object *i915_gem_object_create_from_data( struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size); struct drm_device *dev, const void *data, size_t size);
void i915_init_vm(struct drm_i915_private *dev_priv,
struct i915_address_space *vm);
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
void i915_gem_vma_destroy(struct i915_vma *vma); void i915_gem_vma_destroy(struct i915_vma *vma);
@ -3173,6 +3181,10 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size, struct drm_mm_node *node, u64 size,
unsigned alignment); unsigned alignment);
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment, u64 start,
u64 end);
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node); struct drm_mm_node *node);
int i915_gem_init_stolen(struct drm_device *dev); int i915_gem_init_stolen(struct drm_device *dev);

View File

@ -1713,8 +1713,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
/** /**
* i915_gem_fault - fault a page into the GTT * i915_gem_fault - fault a page into the GTT
* vma: VMA in question * @vma: VMA in question
* vmf: fault info * @vmf: fault info
* *
* The fault handler is set up by drm_gem_mmap() when a object is GTT mapped * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
* from userspace. The fault handler takes care of binding the object to * from userspace. The fault handler takes care of binding the object to
@ -4609,14 +4609,8 @@ int i915_gem_init_rings(struct drm_device *dev)
goto cleanup_vebox_ring; goto cleanup_vebox_ring;
} }
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (ret)
goto cleanup_bsd2_ring;
return 0; return 0;
cleanup_bsd2_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
cleanup_vebox_ring: cleanup_vebox_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VECS]); intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
cleanup_blt_ring: cleanup_blt_ring:
@ -4687,21 +4681,32 @@ i915_gem_init_hw(struct drm_device *dev)
} }
/* We can't enable contexts until all firmware is loaded */ /* We can't enable contexts until all firmware is loaded */
ret = intel_guc_ucode_load(dev); if (HAS_GUC_UCODE(dev)) {
if (ret) { ret = intel_guc_ucode_load(dev);
/* if (ret) {
* If we got an error and GuC submission is enabled, map /*
* the error to -EIO so the GPU will be declared wedged. * If we got an error and GuC submission is enabled, map
* OTOH, if we didn't intend to use the GuC anyway, just * the error to -EIO so the GPU will be declared wedged.
* discard the error and carry on. * OTOH, if we didn't intend to use the GuC anyway, just
*/ * discard the error and carry on.
DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret, */
i915.enable_guc_submission ? "" : " (ignored)"); DRM_ERROR("Failed to initialize GuC, error %d%s\n", ret,
ret = i915.enable_guc_submission ? -EIO : 0; i915.enable_guc_submission ? "" :
if (ret) " (ignored)");
goto out; ret = i915.enable_guc_submission ? -EIO : 0;
if (ret)
goto out;
}
} }
/*
* Increment the next seqno by 0x100 so we have a visible break
* on re-initialisation
*/
ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
if (ret)
goto out;
/* Now it is safe to go back round and do everything else: */ /* Now it is safe to go back round and do everything else: */
for_each_ring(ring, dev_priv, i) { for_each_ring(ring, dev_priv, i) {
struct drm_i915_gem_request *req; struct drm_i915_gem_request *req;
@ -4839,18 +4844,6 @@ init_ring_lists(struct intel_engine_cs *ring)
INIT_LIST_HEAD(&ring->request_list); INIT_LIST_HEAD(&ring->request_list);
} }
void i915_init_vm(struct drm_i915_private *dev_priv,
struct i915_address_space *vm)
{
if (!i915_is_ggtt(vm))
drm_mm_init(&vm->mm, vm->start, vm->total);
vm->dev = dev_priv->dev;
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
INIT_LIST_HEAD(&vm->global_link);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
void void
i915_gem_load(struct drm_device *dev) i915_gem_load(struct drm_device *dev)
{ {
@ -4874,8 +4867,6 @@ i915_gem_load(struct drm_device *dev)
NULL); NULL);
INIT_LIST_HEAD(&dev_priv->vm_list); INIT_LIST_HEAD(&dev_priv->vm_list);
i915_init_vm(dev_priv, &dev_priv->gtt.base);
INIT_LIST_HEAD(&dev_priv->context_list); INIT_LIST_HEAD(&dev_priv->context_list);
INIT_LIST_HEAD(&dev_priv->mm.unbound_list); INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.bound_list);
@ -4903,6 +4894,14 @@ i915_gem_load(struct drm_device *dev)
dev_priv->num_fence_regs = dev_priv->num_fence_regs =
I915_READ(vgtif_reg(avail_rs.fence_num)); I915_READ(vgtif_reg(avail_rs.fence_num));
/*
* Set initial sequence number for requests.
* Using this number allows the wraparound to happen early,
* catching any obvious problems.
*/
dev_priv->next_seqno = ((u32)~0 - 0x1100);
dev_priv->last_seqno = ((u32)~0 - 0x1101);
/* Initialize fence registers to zero */ /* Initialize fence registers to zero */
INIT_LIST_HEAD(&dev_priv->mm.fence_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list);
i915_gem_restore_fences(dev); i915_gem_restore_fences(dev);
@ -4972,9 +4971,9 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
/** /**
* i915_gem_track_fb - update frontbuffer tracking * i915_gem_track_fb - update frontbuffer tracking
* old: current GEM buffer for the frontbuffer slots * @old: current GEM buffer for the frontbuffer slots
* new: new GEM buffer for the frontbuffer slots * @new: new GEM buffer for the frontbuffer slots
* frontbuffer_bits: bitmask of frontbuffer slots * @frontbuffer_bits: bitmask of frontbuffer slots
* *
* This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
* from @old and setting them in @new. Both @old and @new can be NULL. * from @old and setting them in @new. Both @old and @new can be NULL.

View File

@ -1009,7 +1009,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
} }
if (i915.enable_execlists && !ctx->engine[ring->id].state) { if (i915.enable_execlists && !ctx->engine[ring->id].state) {
int ret = intel_lr_context_deferred_create(ctx, ring); int ret = intel_lr_context_deferred_alloc(ctx, ring);
if (ret) { if (ret) {
DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret); DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
return ERR_PTR(ret); return ERR_PTR(ret);

View File

@ -2121,6 +2121,16 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
return gen8_ppgtt_init(ppgtt); return gen8_ppgtt_init(ppgtt);
} }
static void i915_address_space_init(struct i915_address_space *vm,
struct drm_i915_private *dev_priv)
{
drm_mm_init(&vm->mm, vm->start, vm->total);
vm->dev = dev_priv->dev;
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
list_add_tail(&vm->global_link, &dev_priv->vm_list);
}
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -2129,9 +2139,7 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
ret = __hw_ppgtt_init(dev, ppgtt); ret = __hw_ppgtt_init(dev, ppgtt);
if (ret == 0) { if (ret == 0) {
kref_init(&ppgtt->ref); kref_init(&ppgtt->ref);
drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, i915_address_space_init(&ppgtt->base, dev_priv);
ppgtt->base.total);
i915_init_vm(dev_priv, &ppgtt->base);
} }
return ret; return ret;
@ -2525,7 +2533,6 @@ static int ggtt_bind_vma(struct i915_vma *vma,
* the bound flag ourselves. * the bound flag ourselves.
*/ */
vma->bound |= GLOBAL_BIND; vma->bound |= GLOBAL_BIND;
} }
if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
@ -2618,11 +2625,13 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
BUG_ON(mappable_end > end); BUG_ON(mappable_end > end);
/* Subtract the guard page ... */ ggtt_vm->start = start;
drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
dev_priv->gtt.base.start = start; /* Subtract the guard page before address space initialization to
dev_priv->gtt.base.total = end - start; * shrink the range used by drm_mm */
ggtt_vm->total = end - start - PAGE_SIZE;
i915_address_space_init(ggtt_vm, dev_priv);
ggtt_vm->total += PAGE_SIZE;
if (intel_vgpu_active(dev)) { if (intel_vgpu_active(dev)) {
ret = intel_vgt_balloon(dev); ret = intel_vgt_balloon(dev);
@ -2631,7 +2640,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
} }
if (!HAS_LLC(dev)) if (!HAS_LLC(dev))
dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust; ggtt_vm->mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */ /* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
@ -2647,6 +2656,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
return ret; return ret;
} }
vma->bound |= GLOBAL_BIND; vma->bound |= GLOBAL_BIND;
list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
} }
/* Clear any non-preallocated blocks */ /* Clear any non-preallocated blocks */
@ -3234,15 +3244,18 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
} }
static void static struct scatterlist *
rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height, rotate_pages(dma_addr_t *in, unsigned int offset,
struct sg_table *st) unsigned int width, unsigned int height,
struct sg_table *st, struct scatterlist *sg)
{ {
unsigned int column, row; unsigned int column, row;
unsigned int src_idx; unsigned int src_idx;
struct scatterlist *sg = st->sgl;
st->nents = 0; if (!sg) {
st->nents = 0;
sg = st->sgl;
}
for (column = 0; column < width; column++) { for (column = 0; column < width; column++) {
src_idx = width * (height - 1) + column; src_idx = width * (height - 1) + column;
@ -3253,12 +3266,14 @@ rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height,
* The only thing we need are DMA addresses. * The only thing we need are DMA addresses.
*/ */
sg_set_page(sg, NULL, PAGE_SIZE, 0); sg_set_page(sg, NULL, PAGE_SIZE, 0);
sg_dma_address(sg) = in[src_idx]; sg_dma_address(sg) = in[offset + src_idx];
sg_dma_len(sg) = PAGE_SIZE; sg_dma_len(sg) = PAGE_SIZE;
sg = sg_next(sg); sg = sg_next(sg);
src_idx -= width; src_idx -= width;
} }
} }
return sg;
} }
static struct sg_table * static struct sg_table *
@ -3267,10 +3282,13 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
{ {
struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
unsigned int size_pages = rot_info->size >> PAGE_SHIFT; unsigned int size_pages = rot_info->size >> PAGE_SHIFT;
unsigned int size_pages_uv;
struct sg_page_iter sg_iter; struct sg_page_iter sg_iter;
unsigned long i; unsigned long i;
dma_addr_t *page_addr_list; dma_addr_t *page_addr_list;
struct sg_table *st; struct sg_table *st;
unsigned int uv_start_page;
struct scatterlist *sg;
int ret = -ENOMEM; int ret = -ENOMEM;
/* Allocate a temporary list of source pages for random access. */ /* Allocate a temporary list of source pages for random access. */
@ -3279,12 +3297,18 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
if (!page_addr_list) if (!page_addr_list)
return ERR_PTR(ret); return ERR_PTR(ret);
/* Account for UV plane with NV12. */
if (rot_info->pixel_format == DRM_FORMAT_NV12)
size_pages_uv = rot_info->size_uv >> PAGE_SHIFT;
else
size_pages_uv = 0;
/* Allocate target SG list. */ /* Allocate target SG list. */
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st) if (!st)
goto err_st_alloc; goto err_st_alloc;
ret = sg_alloc_table(st, size_pages, GFP_KERNEL); ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
if (ret) if (ret)
goto err_sg_alloc; goto err_sg_alloc;
@ -3296,15 +3320,32 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
} }
/* Rotate the pages. */ /* Rotate the pages. */
rotate_pages(page_addr_list, sg = rotate_pages(page_addr_list, 0,
rot_info->width_pages, rot_info->height_pages, rot_info->width_pages, rot_info->height_pages,
st); st, NULL);
/* Append the UV plane if NV12. */
if (rot_info->pixel_format == DRM_FORMAT_NV12) {
uv_start_page = size_pages;
/* Check for tile-row un-alignment. */
if (offset_in_page(rot_info->uv_offset))
uv_start_page--;
rot_info->uv_start_page = uv_start_page;
rotate_pages(page_addr_list, uv_start_page,
rot_info->width_pages_uv,
rot_info->height_pages_uv,
st, sg);
}
DRM_DEBUG_KMS( DRM_DEBUG_KMS(
"Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages).\n", "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n",
obj->base.size, rot_info->pitch, rot_info->height, obj->base.size, rot_info->pitch, rot_info->height,
rot_info->pixel_format, rot_info->width_pages, rot_info->pixel_format, rot_info->width_pages,
rot_info->height_pages, size_pages); rot_info->height_pages, size_pages + size_pages_uv,
size_pages);
drm_free_large(page_addr_list); drm_free_large(page_addr_list);
@ -3316,10 +3357,11 @@ intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
drm_free_large(page_addr_list); drm_free_large(page_addr_list);
DRM_DEBUG_KMS( DRM_DEBUG_KMS(
"Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages)\n", "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n",
obj->base.size, ret, rot_info->pitch, rot_info->height, obj->base.size, ret, rot_info->pitch, rot_info->height,
rot_info->pixel_format, rot_info->width_pages, rot_info->pixel_format, rot_info->width_pages,
rot_info->height_pages, size_pages); rot_info->height_pages, size_pages + size_pages_uv,
size_pages);
return ERR_PTR(ret); return ERR_PTR(ret);
} }

View File

@ -138,10 +138,14 @@ enum i915_ggtt_view_type {
struct intel_rotation_info { struct intel_rotation_info {
unsigned int height; unsigned int height;
unsigned int pitch; unsigned int pitch;
unsigned int uv_offset;
uint32_t pixel_format; uint32_t pixel_format;
uint64_t fb_modifier; uint64_t fb_modifier;
unsigned int width_pages, height_pages; unsigned int width_pages, height_pages;
uint64_t size; uint64_t size;
unsigned int width_pages_uv, height_pages_uv;
uint64_t size_uv;
unsigned int uv_start_page;
}; };
struct i915_ggtt_view { struct i915_ggtt_view {
@ -341,6 +345,7 @@ struct i915_gtt {
struct i915_address_space base; struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */ size_t stolen_size; /* Total size of stolen memory */
size_t stolen_usable_size; /* Total size minus BIOS reserved */
u64 mappable_end; /* End offset that we can CPU map */ u64 mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */ struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */ phys_addr_t mappable_base; /* PA of our GMADR */

View File

@ -42,9 +42,9 @@
* for is a boon. * for is a boon.
*/ */
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size, struct drm_mm_node *node, u64 size,
unsigned alignment) unsigned alignment, u64 start, u64 end)
{ {
int ret; int ret;
@ -52,13 +52,23 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
return -ENODEV; return -ENODEV;
mutex_lock(&dev_priv->mm.stolen_lock); mutex_lock(&dev_priv->mm.stolen_lock);
ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment, ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
DRM_MM_SEARCH_DEFAULT); alignment, start, end,
DRM_MM_SEARCH_DEFAULT);
mutex_unlock(&dev_priv->mm.stolen_lock); mutex_unlock(&dev_priv->mm.stolen_lock);
return ret; return ret;
} }
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size,
unsigned alignment)
{
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
alignment, 0,
dev_priv->gtt.stolen_usable_size);
}
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node) struct drm_mm_node *node)
{ {
@ -186,6 +196,29 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
drm_mm_takedown(&dev_priv->mm.stolen); drm_mm_takedown(&dev_priv->mm.stolen);
} }
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size)
{
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
unsigned long stolen_top = dev_priv->mm.stolen_base +
dev_priv->gtt.stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
/* On these platforms, the register doesn't have a size field, so the
* size is the distance between the base and the top of the stolen
* memory. We also have the genuine case where base is zero and there's
* nothing reserved. */
if (*base == 0)
*size = 0;
else
*size = stolen_top - *base;
}
static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv, static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size) unsigned long *base, unsigned long *size)
{ {
@ -281,7 +314,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_device *dev) int i915_gem_init_stolen(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reserved_total, reserved_base, reserved_size; unsigned long reserved_total, reserved_base = 0, reserved_size;
unsigned long stolen_top; unsigned long stolen_top;
mutex_init(&dev_priv->mm.stolen_lock); mutex_init(&dev_priv->mm.stolen_lock);
@ -305,7 +338,12 @@ int i915_gem_init_stolen(struct drm_device *dev)
switch (INTEL_INFO(dev_priv)->gen) { switch (INTEL_INFO(dev_priv)->gen) {
case 2: case 2:
case 3: case 3:
break;
case 4: case 4:
if (IS_G4X(dev))
g4x_get_stolen_reserved(dev_priv, &reserved_base,
&reserved_size);
break;
case 5: case 5:
/* Assume the gen6 maximum for the older platforms. */ /* Assume the gen6 maximum for the older platforms. */
reserved_size = 1024 * 1024; reserved_size = 1024 * 1024;
@ -352,9 +390,11 @@ int i915_gem_init_stolen(struct drm_device *dev)
dev_priv->gtt.stolen_size >> 10, dev_priv->gtt.stolen_size >> 10,
(dev_priv->gtt.stolen_size - reserved_total) >> 10); (dev_priv->gtt.stolen_size - reserved_total) >> 10);
dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
reserved_total;
/* Basic memrange allocator for stolen space */ /* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size - drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
reserved_total);
return 0; return 0;
} }
@ -544,7 +584,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto err_out; goto err;
} }
/* To simplify the initialisation sequence between KMS and GTT, /* To simplify the initialisation sequence between KMS and GTT,
@ -558,23 +598,19 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
if (ret) { if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err_vma; goto err;
} }
vma->bound |= GLOBAL_BIND;
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
} }
vma->bound |= GLOBAL_BIND;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
i915_gem_object_pin_pages(obj); i915_gem_object_pin_pages(obj);
return obj; return obj;
err_vma: err:
i915_gem_vma_destroy(vma);
err_out:
i915_gem_stolen_remove_node(dev_priv, stolen);
kfree(stolen);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
return NULL; return NULL;
} }

View File

@ -457,17 +457,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
} }
if ((obj = error->ring[i].hws_page)) { if ((obj = error->ring[i].hws_page)) {
u64 hws_offset = obj->gtt_offset;
u32 *hws_page = &obj->pages[0][0];
if (i915.enable_execlists) {
hws_offset += LRC_PPHWSP_PN * PAGE_SIZE;
hws_page = &obj->pages[LRC_PPHWSP_PN][0];
}
err_printf(m, "%s --- HW Status = 0x%08llx\n", err_printf(m, "%s --- HW Status = 0x%08llx\n",
dev_priv->ring[i].name, dev_priv->ring[i].name, hws_offset);
obj->gtt_offset + LRC_PPHWSP_PN * PAGE_SIZE);
offset = 0; offset = 0;
for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
err_printf(m, "[%04x] %08x %08x %08x %08x\n", err_printf(m, "[%04x] %08x %08x %08x %08x\n",
offset, offset,
obj->pages[LRC_PPHWSP_PN][elt], hws_page[elt],
obj->pages[LRC_PPHWSP_PN][elt+1], hws_page[elt+1],
obj->pages[LRC_PPHWSP_PN][elt+2], hws_page[elt+2],
obj->pages[LRC_PPHWSP_PN][elt+3]); hws_page[elt+3]);
offset += 16; offset += 16;
} }
} }

View File

@ -53,6 +53,7 @@
#define START_DMA (1<<0) #define START_DMA (1<<0)
#define DMA_GUC_WOPCM_OFFSET 0xc340 #define DMA_GUC_WOPCM_OFFSET 0xc340
#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ #define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
#define GUC_MAX_IDLE_COUNT 0xC3E4
#define GUC_WOPCM_SIZE 0xc050 #define GUC_WOPCM_SIZE 0xc050
#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */ #define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */

View File

@ -167,6 +167,44 @@ static const u32 hpd_bxt[HPD_NUM_PINS] = {
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
/* For display hotplug interrupt */
static inline void
i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
uint32_t mask,
uint32_t bits)
{
uint32_t val;
assert_spin_locked(&dev_priv->irq_lock);
WARN_ON(bits & ~mask);
val = I915_READ(PORT_HOTPLUG_EN);
val &= ~mask;
val |= bits;
I915_WRITE(PORT_HOTPLUG_EN, val);
}
/**
* i915_hotplug_interrupt_update - update hotplug interrupt enable
* @dev_priv: driver private
* @mask: bits to update
* @bits: bits to enable
* NOTE: the HPD enable bits are modified both inside and outside
* of an interrupt context. To avoid that read-modify-write cycles
* interfer, these bits are protected by a spinlock. Since this
* function is usually not called from a context where the lock is
* held already, this function acquires the lock itself. A non-locking
* version is also available.
*/
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
uint32_t mask,
uint32_t bits)
{
spin_lock_irq(&dev_priv->irq_lock);
i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
spin_unlock_irq(&dev_priv->irq_lock);
}
/** /**
* ilk_update_display_irq - update DEIMR * ilk_update_display_irq - update DEIMR
* @dev_priv: driver private * @dev_priv: driver private
@ -1681,7 +1719,6 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev)
intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
hotplug_trigger, hpd_status_i915, hotplug_trigger, hpd_status_i915,
i9xx_port_hotplug_long_detect); i9xx_port_hotplug_long_detect);
intel_hpd_irq_handler(dev, pin_mask, long_mask); intel_hpd_irq_handler(dev, pin_mask, long_mask);
} }
} }
@ -3075,7 +3112,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{ {
enum pipe pipe; enum pipe pipe;
I915_WRITE(PORT_HOTPLUG_EN, 0); i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
for_each_pipe(dev_priv, pipe) for_each_pipe(dev_priv, pipe)
@ -3491,7 +3528,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{ {
dev_priv->irq_mask = ~0; dev_priv->irq_mask = ~0;
I915_WRITE(PORT_HOTPLUG_EN, 0); i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN); POSTING_READ(PORT_HOTPLUG_EN);
I915_WRITE(VLV_IIR, 0xffffffff); I915_WRITE(VLV_IIR, 0xffffffff);
@ -3865,7 +3902,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
int pipe; int pipe;
if (I915_HAS_HOTPLUG(dev)) { if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0); i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
} }
@ -3899,7 +3936,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_USER_INTERRUPT; I915_USER_INTERRUPT;
if (I915_HAS_HOTPLUG(dev)) { if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0); i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN); POSTING_READ(PORT_HOTPLUG_EN);
/* Enable in IER... */ /* Enable in IER... */
@ -4061,7 +4098,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
int pipe; int pipe;
if (I915_HAS_HOTPLUG(dev)) { if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0); i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
} }
@ -4082,7 +4119,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
int pipe; int pipe;
I915_WRITE(PORT_HOTPLUG_EN, 0); i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(HWSTAM, 0xeffe); I915_WRITE(HWSTAM, 0xeffe);
@ -4143,7 +4180,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
I915_WRITE(IER, enable_mask); I915_WRITE(IER, enable_mask);
POSTING_READ(IER); POSTING_READ(IER);
I915_WRITE(PORT_HOTPLUG_EN, 0); i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
POSTING_READ(PORT_HOTPLUG_EN); POSTING_READ(PORT_HOTPLUG_EN);
i915_enable_asle_pipestat(dev); i915_enable_asle_pipestat(dev);
@ -4158,22 +4195,22 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
assert_spin_locked(&dev_priv->irq_lock); assert_spin_locked(&dev_priv->irq_lock);
hotplug_en = I915_READ(PORT_HOTPLUG_EN);
hotplug_en &= ~HOTPLUG_INT_EN_MASK;
/* Note HDMI and DP share hotplug bits */ /* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */ /* enable bits are the same for all generations */
hotplug_en |= intel_hpd_enabled_irqs(dev, hpd_mask_i915); hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
/* Programming the CRT detection parameters tends /* Programming the CRT detection parameters tends
to generate a spurious hotplug event about three to generate a spurious hotplug event about three
seconds later. So just do it once. seconds later. So just do it once.
*/ */
if (IS_G4X(dev)) if (IS_G4X(dev))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
/* Ignore TV since it's buggy */ /* Ignore TV since it's buggy */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); i915_hotplug_interrupt_update_locked(dev_priv,
(HOTPLUG_INT_EN_MASK
| CRT_HOTPLUG_VOLTAGE_COMPARE_MASK),
hotplug_en);
} }
static irqreturn_t i965_irq_handler(int irq, void *arg) static irqreturn_t i965_irq_handler(int irq, void *arg)
@ -4286,7 +4323,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
if (!dev_priv) if (!dev_priv)
return; return;
I915_WRITE(PORT_HOTPLUG_EN, 0); i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
I915_WRITE(HWSTAM, 0xffffffff); I915_WRITE(HWSTAM, 0xffffffff);

View File

@ -40,7 +40,6 @@ struct i915_params i915 __read_mostly = {
.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT), .preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
.disable_power_well = 1, .disable_power_well = 1,
.enable_ips = 1, .enable_ips = 1,
.fastboot = 0,
.prefault_disable = 0, .prefault_disable = 0,
.load_detect_test = 0, .load_detect_test = 0,
.reset = true, .reset = true,
@ -62,7 +61,7 @@ MODULE_PARM_DESC(modeset,
"Use kernel modesetting [KMS] (0=disable, " "Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])"); "1=on, -1=force vga console preference [default])");
module_param_named(panel_ignore_lid, i915.panel_ignore_lid, int, 0600); module_param_named_unsafe(panel_ignore_lid, i915.panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid, MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect, 1=autodetect disabled [default], " "Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)"); "-1=force lid closed, -2=force lid open)");
@ -85,17 +84,17 @@ MODULE_PARM_DESC(enable_fbc,
"Enable frame buffer compression for power savings " "Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))"); "(default: -1 (use per-chip default))");
module_param_named(lvds_channel_mode, i915.lvds_channel_mode, int, 0600); module_param_named_unsafe(lvds_channel_mode, i915.lvds_channel_mode, int, 0600);
MODULE_PARM_DESC(lvds_channel_mode, MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode " "Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
module_param_named(lvds_use_ssc, i915.panel_use_ssc, int, 0600); module_param_named_unsafe(lvds_use_ssc, i915.panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc, MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] " "Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)"); "(default: auto from VBT)");
module_param_named(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600); module_param_named_unsafe(vbt_sdvo_panel_type, i915.vbt_sdvo_panel_type, int, 0600);
MODULE_PARM_DESC(vbt_sdvo_panel_type, MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT " "Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)"); "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
@ -103,7 +102,7 @@ MODULE_PARM_DESC(vbt_sdvo_panel_type,
module_param_named_unsafe(reset, i915.reset, bool, 0600); module_param_named_unsafe(reset, i915.reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
module_param_named(enable_hangcheck, i915.enable_hangcheck, bool, 0644); module_param_named_unsafe(enable_hangcheck, i915.enable_hangcheck, bool, 0644);
MODULE_PARM_DESC(enable_hangcheck, MODULE_PARM_DESC(enable_hangcheck,
"Periodically check GPU activity for detecting hangs. " "Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. " "WARNING: Disabling this can cause system wide hangs. "
@ -114,29 +113,25 @@ MODULE_PARM_DESC(enable_ppgtt,
"Override PPGTT usage. " "Override PPGTT usage. "
"(-1=auto [default], 0=disabled, 1=aliasing, 2=full)"); "(-1=auto [default], 0=disabled, 1=aliasing, 2=full)");
module_param_named(enable_execlists, i915.enable_execlists, int, 0400); module_param_named_unsafe(enable_execlists, i915.enable_execlists, int, 0400);
MODULE_PARM_DESC(enable_execlists, MODULE_PARM_DESC(enable_execlists,
"Override execlists usage. " "Override execlists usage. "
"(-1=auto [default], 0=disabled, 1=enabled)"); "(-1=auto [default], 0=disabled, 1=enabled)");
module_param_named(enable_psr, i915.enable_psr, int, 0600); module_param_named_unsafe(enable_psr, i915.enable_psr, int, 0600);
MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)"); MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
module_param_named(preliminary_hw_support, i915.preliminary_hw_support, int, 0600); module_param_named_unsafe(preliminary_hw_support, i915.preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support, MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support."); "Enable preliminary hardware support.");
module_param_named(disable_power_well, i915.disable_power_well, int, 0600); module_param_named_unsafe(disable_power_well, i915.disable_power_well, int, 0600);
MODULE_PARM_DESC(disable_power_well, MODULE_PARM_DESC(disable_power_well,
"Disable the power well when possible (default: true)"); "Disable the power well when possible (default: true)");
module_param_named(enable_ips, i915.enable_ips, int, 0600); module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)"); MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
module_param_named(fastboot, i915.fastboot, bool, 0600);
MODULE_PARM_DESC(fastboot,
"Try to skip unnecessary mode sets at boot time (default: false)");
module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
MODULE_PARM_DESC(prefault_disable, MODULE_PARM_DESC(prefault_disable,
"Disable page prefaulting for pread/pwrite/reloc (default:false). " "Disable page prefaulting for pread/pwrite/reloc (default:false). "
@ -147,7 +142,7 @@ MODULE_PARM_DESC(load_detect_test,
"Force-enable the VGA load detect code for testing (default:false). " "Force-enable the VGA load detect code for testing (default:false). "
"For developers only."); "For developers only.");
module_param_named(invert_brightness, i915.invert_brightness, int, 0600); module_param_named_unsafe(invert_brightness, i915.invert_brightness, int, 0600);
MODULE_PARM_DESC(invert_brightness, MODULE_PARM_DESC(invert_brightness,
"Invert backlight brightness " "Invert backlight brightness "
"(-1 force normal, 0 machine defaults, 1 force inversion), please " "(-1 force normal, 0 machine defaults, 1 force inversion), please "
@ -158,14 +153,14 @@ MODULE_PARM_DESC(invert_brightness,
module_param_named(disable_display, i915.disable_display, bool, 0600); module_param_named(disable_display, i915.disable_display, bool, 0600);
MODULE_PARM_DESC(disable_display, "Disable display (default: false)"); MODULE_PARM_DESC(disable_display, "Disable display (default: false)");
module_param_named(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600); module_param_named_unsafe(disable_vtd_wa, i915.disable_vtd_wa, bool, 0600);
MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)"); MODULE_PARM_DESC(disable_vtd_wa, "Disable all VT-d workarounds (default: false)");
module_param_named(enable_cmd_parser, i915.enable_cmd_parser, int, 0600); module_param_named_unsafe(enable_cmd_parser, i915.enable_cmd_parser, int, 0600);
MODULE_PARM_DESC(enable_cmd_parser, MODULE_PARM_DESC(enable_cmd_parser,
"Enable command parsing (1=enabled [default], 0=disabled)"); "Enable command parsing (1=enabled [default], 0=disabled)");
module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600); module_param_named_unsafe(use_mmio_flip, i915.use_mmio_flip, int, 0600);
MODULE_PARM_DESC(use_mmio_flip, MODULE_PARM_DESC(use_mmio_flip,
"use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)");

View File

@ -1527,7 +1527,7 @@ enum skl_disp_power_wells {
#define GEN7_GFX_PEND_TLB0 0x4034 #define GEN7_GFX_PEND_TLB0 0x4034
#define GEN7_GFX_PEND_TLB1 0x4038 #define GEN7_GFX_PEND_TLB1 0x4038
/* L3, CVS, ZTLB, RCC, CASC LRA min, max values */ /* L3, CVS, ZTLB, RCC, CASC LRA min, max values */
#define GEN7_LRA_LIMITS_BASE 0x403C #define GEN7_LRA_LIMITS(i) (0x403C + (i) * 4)
#define GEN7_LRA_LIMITS_REG_NUM 13 #define GEN7_LRA_LIMITS_REG_NUM 13
#define GEN7_MEDIA_MAX_REQ_COUNT 0x4070 #define GEN7_MEDIA_MAX_REQ_COUNT 0x4070
#define GEN7_GFX_MAX_REQ_COUNT 0x4074 #define GEN7_GFX_MAX_REQ_COUNT 0x4074
@ -2011,7 +2011,7 @@ enum skl_disp_power_wells {
#define FBC_CTL_CPU_FENCE (1<<1) #define FBC_CTL_CPU_FENCE (1<<1)
#define FBC_CTL_PLANE(plane) ((plane)<<0) #define FBC_CTL_PLANE(plane) ((plane)<<0)
#define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */ #define FBC_FENCE_OFF 0x03218 /* BSpec typo has 321Bh */
#define FBC_TAG 0x03300 #define FBC_TAG(i) (0x03300 + (i) * 4)
#define FBC_STATUS2 0x43214 #define FBC_STATUS2 0x43214
#define FBC_COMPRESSION_MASK 0x7ff #define FBC_COMPRESSION_MASK 0x7ff
@ -2494,6 +2494,11 @@ enum skl_disp_power_wells {
#define MCHBAR_MIRROR_BASE_SNB 0x140000 #define MCHBAR_MIRROR_BASE_SNB 0x140000
#define CTG_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x34)
#define ELK_STOLEN_RESERVED (MCHBAR_MIRROR_BASE + 0x48)
#define G4X_STOLEN_RESERVED_ADDR1_MASK (0xFFFF << 16)
#define G4X_STOLEN_RESERVED_ADDR2_MASK (0xFFF << 4)
/* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */ /* Memory controller frequency in MCHBAR for Haswell (possible SNB+) */
#define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04) #define DCLK (MCHBAR_MIRROR_BASE_SNB + 0x5e04)
@ -2574,7 +2579,7 @@ enum skl_disp_power_wells {
#define TSFS_INTR_MASK 0x000000ff #define TSFS_INTR_MASK 0x000000ff
#define CRSTANDVID 0x11100 #define CRSTANDVID 0x11100
#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */ #define PXVFREQ(i) (0x11110 + (i) * 4) /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
#define PXVFREQ_PX_MASK 0x7f000000 #define PXVFREQ_PX_MASK 0x7f000000
#define PXVFREQ_PX_SHIFT 24 #define PXVFREQ_PX_SHIFT 24
#define VIDFREQ_BASE 0x11110 #define VIDFREQ_BASE 0x11110
@ -2758,8 +2763,8 @@ enum skl_disp_power_wells {
#define CSIEW0 0x11250 #define CSIEW0 0x11250
#define CSIEW1 0x11254 #define CSIEW1 0x11254
#define CSIEW2 0x11258 #define CSIEW2 0x11258
#define PEW 0x1125c #define PEW(i) (0x1125c + (i) * 4) /* 5 registers */
#define DEW 0x11270 #define DEW(i) (0x11270 + (i) * 4) /* 3 registers */
#define MCHAFE 0x112c0 #define MCHAFE 0x112c0
#define CSIEC 0x112e0 #define CSIEC 0x112e0
#define DMIEC 0x112e4 #define DMIEC 0x112e4
@ -2783,8 +2788,8 @@ enum skl_disp_power_wells {
#define EG5 0x11624 #define EG5 0x11624
#define EG6 0x11628 #define EG6 0x11628
#define EG7 0x1162c #define EG7 0x1162c
#define PXW 0x11664 #define PXW(i) (0x11664 + (i) * 4) /* 4 registers */
#define PXWL 0x11680 #define PXWL(i) (0x11680 + (i) * 4) /* 8 registers */
#define LCFUSE02 0x116c0 #define LCFUSE02 0x116c0
#define LCFUSE_HIV_MASK 0x000000ff #define LCFUSE_HIV_MASK 0x000000ff
#define CSIPLL0 0x12c10 #define CSIPLL0 0x12c10
@ -4077,14 +4082,10 @@ enum skl_disp_power_wells {
# define TV_CC_DATA_1_MASK 0x0000007f # define TV_CC_DATA_1_MASK 0x0000007f
# define TV_CC_DATA_1_SHIFT 0 # define TV_CC_DATA_1_SHIFT 0
#define TV_H_LUMA_0 0x68100 #define TV_H_LUMA(i) (0x68100 + (i) * 4) /* 60 registers */
#define TV_H_LUMA_59 0x681ec #define TV_H_CHROMA(i) (0x68200 + (i) * 4) /* 60 registers */
#define TV_H_CHROMA_0 0x68200 #define TV_V_LUMA(i) (0x68300 + (i) * 4) /* 43 registers */
#define TV_H_CHROMA_59 0x682ec #define TV_V_CHROMA(i) (0x68400 + (i) * 4) /* 43 registers */
#define TV_V_LUMA_0 0x68300
#define TV_V_LUMA_42 0x683a8
#define TV_V_CHROMA_0 0x68400
#define TV_V_CHROMA_42 0x684a8
/* Display Port */ /* Display Port */
#define DP_A 0x64000 /* eDP */ #define DP_A 0x64000 /* eDP */
@ -6808,7 +6809,7 @@ enum skl_disp_power_wells {
GEN6_PM_RP_DOWN_THRESHOLD | \ GEN6_PM_RP_DOWN_THRESHOLD | \
GEN6_PM_RP_DOWN_TIMEOUT) GEN6_PM_RP_DOWN_TIMEOUT)
#define GEN7_GT_SCRATCH_BASE 0x4F100 #define GEN7_GT_SCRATCH(i) (0x4F100 + (i) * 4)
#define GEN7_GT_SCRATCH_REG_NUM 8 #define GEN7_GT_SCRATCH_REG_NUM 8
#define VLV_GTLC_SURVIVABILITY_REG 0x130098 #define VLV_GTLC_SURVIVABILITY_REG 0x130098
@ -6897,6 +6898,7 @@ enum skl_disp_power_wells {
#define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0) #define GEN7_DOP_CLOCK_GATE_ENABLE (1<<0)
#define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2) #define GEN8_DOP_CLOCK_GATE_CFCLK_ENABLE (1<<2)
#define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4) #define GEN8_DOP_CLOCK_GATE_GUC_ENABLE (1<<4)
#define GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE (1<<6)
#define GEN8_GARBCNTL 0xB004 #define GEN8_GARBCNTL 0xB004
#define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7) #define GEN9_GAPS_TSV_CREDIT_DISABLE (1<<7)
@ -6942,6 +6944,9 @@ enum skl_disp_power_wells {
#define HSW_ROW_CHICKEN3 0xe49c #define HSW_ROW_CHICKEN3 0xe49c
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6) #define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
#define HALF_SLICE_CHICKEN2 0xe180
#define GEN8_ST_PO_DISABLE (1<<13)
#define HALF_SLICE_CHICKEN3 0xe184 #define HALF_SLICE_CHICKEN3 0xe184
#define HSW_SAMPLE_C_PERFORMANCE (1<<9) #define HSW_SAMPLE_C_PERFORMANCE (1<<9)
#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) #define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
@ -7192,7 +7197,8 @@ enum skl_disp_power_wells {
/* DDI Buffer Translations */ /* DDI Buffer Translations */
#define DDI_BUF_TRANS_A 0x64E00 #define DDI_BUF_TRANS_A 0x64E00
#define DDI_BUF_TRANS_B 0x64E60 #define DDI_BUF_TRANS_B 0x64E60
#define DDI_BUF_TRANS(port) _PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) #define DDI_BUF_TRANS_LO(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8)
#define DDI_BUF_TRANS_HI(port, i) (_PORT(port, DDI_BUF_TRANS_A, DDI_BUF_TRANS_B) + (i) * 8 + 4)
/* Sideband Interface (SBI) is programmed indirectly, via /* Sideband Interface (SBI) is programmed indirectly, via
* SBI_ADDR, which contains the register offset; and SBI_DATA, * SBI_ADDR, which contains the register offset; and SBI_DATA,
@ -7503,6 +7509,44 @@ enum skl_disp_power_wells {
#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */ #define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
/* BXT MIPI mode configure */
#define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8
#define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8
#define BXT_MIPI_TRANS_HACTIVE(tc) _MIPI_PORT(tc, \
_BXT_MIPIA_TRANS_HACTIVE, _BXT_MIPIC_TRANS_HACTIVE)
#define _BXT_MIPIA_TRANS_VACTIVE 0x6B0FC
#define _BXT_MIPIC_TRANS_VACTIVE 0x6B8FC
#define BXT_MIPI_TRANS_VACTIVE(tc) _MIPI_PORT(tc, \
_BXT_MIPIA_TRANS_VACTIVE, _BXT_MIPIC_TRANS_VACTIVE)
#define _BXT_MIPIA_TRANS_VTOTAL 0x6B100
#define _BXT_MIPIC_TRANS_VTOTAL 0x6B900
#define BXT_MIPI_TRANS_VTOTAL(tc) _MIPI_PORT(tc, \
_BXT_MIPIA_TRANS_VTOTAL, _BXT_MIPIC_TRANS_VTOTAL)
#define BXT_DSI_PLL_CTL 0x161000
#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16
#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
#define BXT_DSIC_16X_BY2 (1 << 10)
#define BXT_DSIC_16X_BY3 (2 << 10)
#define BXT_DSIC_16X_BY4 (3 << 10)
#define BXT_DSIA_16X_BY2 (1 << 8)
#define BXT_DSIA_16X_BY3 (2 << 8)
#define BXT_DSIA_16X_BY4 (3 << 8)
#define BXT_DSI_FREQ_SEL_SHIFT 8
#define BXT_DSI_FREQ_SEL_MASK (0xF << BXT_DSI_FREQ_SEL_SHIFT)
#define BXT_DSI_PLL_RATIO_MAX 0x7D
#define BXT_DSI_PLL_RATIO_MIN 0x22
#define BXT_DSI_PLL_RATIO_MASK 0xFF
#define BXT_REF_CLOCK_KHZ 19500
#define BXT_DSI_PLL_ENABLE 0x46080
#define BXT_DSI_PLL_DO_ENABLE (1 << 31)
#define BXT_DSI_PLL_LOCKED (1 << 30)
#define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) #define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190)
#define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) #define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700)
#define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL) #define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL)
@ -7916,6 +7960,11 @@ enum skl_disp_power_wells {
#define READ_REQUEST_PRIORITY_HIGH (3 << 3) #define READ_REQUEST_PRIORITY_HIGH (3 << 3)
#define RGB_FLIP_TO_BGR (1 << 2) #define RGB_FLIP_TO_BGR (1 << 2)
#define BXT_PIPE_SELECT_MASK (7 << 7)
#define BXT_PIPE_SELECT_C (2 << 7)
#define BXT_PIPE_SELECT_B (1 << 7)
#define BXT_PIPE_SELECT_A (0 << 7)
#define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108) #define _MIPIA_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb108)
#define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908) #define _MIPIC_DATA_ADDRESS (dev_priv->mipi_mmio_base + 0xb908)
#define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \ #define MIPI_DATA_ADDRESS(port) _MIPI_PORT(port, _MIPIA_DATA_ADDRESS, \

View File

@ -17,8 +17,8 @@
/* pipe updates */ /* pipe updates */
TRACE_EVENT(i915_pipe_update_start, TRACE_EVENT(i915_pipe_update_start,
TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max), TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc, min, max), TP_ARGS(crtc),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(enum pipe, pipe) __field(enum pipe, pipe)
@ -33,8 +33,8 @@ TRACE_EVENT(i915_pipe_update_start,
__entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev, __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
crtc->pipe); crtc->pipe);
__entry->scanline = intel_get_crtc_scanline(crtc); __entry->scanline = intel_get_crtc_scanline(crtc);
__entry->min = min; __entry->min = crtc->debug.min_vbl;
__entry->max = max; __entry->max = crtc->debug.max_vbl;
), ),
TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
@ -43,8 +43,8 @@ TRACE_EVENT(i915_pipe_update_start,
); );
TRACE_EVENT(i915_pipe_update_vblank_evaded, TRACE_EVENT(i915_pipe_update_vblank_evaded,
TP_PROTO(struct intel_crtc *crtc, u32 min, u32 max, u32 frame), TP_PROTO(struct intel_crtc *crtc),
TP_ARGS(crtc, min, max, frame), TP_ARGS(crtc),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(enum pipe, pipe) __field(enum pipe, pipe)
@ -56,10 +56,10 @@ TRACE_EVENT(i915_pipe_update_vblank_evaded,
TP_fast_assign( TP_fast_assign(
__entry->pipe = crtc->pipe; __entry->pipe = crtc->pipe;
__entry->frame = frame; __entry->frame = crtc->debug.start_vbl_count;
__entry->scanline = intel_get_crtc_scanline(crtc); __entry->scanline = crtc->debug.scanline_start;
__entry->min = min; __entry->min = crtc->debug.min_vbl;
__entry->max = max; __entry->max = crtc->debug.max_vbl;
), ),
TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u", TP_printk("pipe %c, frame=%u, scanline=%u, min=%u, max=%u",
@ -68,8 +68,8 @@ TRACE_EVENT(i915_pipe_update_vblank_evaded,
); );
TRACE_EVENT(i915_pipe_update_end, TRACE_EVENT(i915_pipe_update_end,
TP_PROTO(struct intel_crtc *crtc, u32 frame), TP_PROTO(struct intel_crtc *crtc, u32 frame, int scanline_end),
TP_ARGS(crtc, frame), TP_ARGS(crtc, frame, scanline_end),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(enum pipe, pipe) __field(enum pipe, pipe)
@ -80,7 +80,7 @@ TRACE_EVENT(i915_pipe_update_end,
TP_fast_assign( TP_fast_assign(
__entry->pipe = crtc->pipe; __entry->pipe = crtc->pipe;
__entry->frame = frame; __entry->frame = frame;
__entry->scanline = intel_get_crtc_scanline(crtc); __entry->scanline = scanline_end;
), ),
TP_printk("pipe %c, frame=%u, scanline=%u", TP_printk("pipe %c, frame=%u, scanline=%u",

View File

@ -146,7 +146,7 @@ static bool intel_dsm_detect(void)
if (vga_count == 2 && has_dsm) { if (vga_count == 2 && has_dsm) {
acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n", DRM_DEBUG_DRIVER("vga_switcheroo: detected DSM switching method %s handle\n",
acpi_method_name); acpi_method_name);
return true; return true;
} }

View File

@ -93,6 +93,8 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
__drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
crtc_state->update_pipe = false;
return &crtc_state->base; return &crtc_state->base;
} }

View File

@ -741,7 +741,6 @@ int intel_parse_bios(struct drm_device *dev);
*/ */
#define DEVICE_TYPE_eDP_BITS \ #define DEVICE_TYPE_eDP_BITS \
(DEVICE_TYPE_INTERNAL_CONNECTOR | \ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
DEVICE_TYPE_NOT_HDMI_OUTPUT | \
DEVICE_TYPE_MIPI_OUTPUT | \ DEVICE_TYPE_MIPI_OUTPUT | \
DEVICE_TYPE_COMPOSITE_OUTPUT | \ DEVICE_TYPE_COMPOSITE_OUTPUT | \
DEVICE_TYPE_DUAL_CHANNEL | \ DEVICE_TYPE_DUAL_CHANNEL | \
@ -749,7 +748,6 @@ int intel_parse_bios(struct drm_device *dev);
DEVICE_TYPE_TMDS_DVI_SIGNALING | \ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
DEVICE_TYPE_VIDEO_SIGNALING | \ DEVICE_TYPE_VIDEO_SIGNALING | \
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
DEVICE_TYPE_DIGITAL_OUTPUT | \
DEVICE_TYPE_ANALOG_OUTPUT) DEVICE_TYPE_ANALOG_OUTPUT)
/* define the DVO port for HDMI output type */ /* define the DVO port for HDMI output type */

View File

@ -376,7 +376,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{ {
struct drm_device *dev = connector->dev; struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 hotplug_en, orig, stat; u32 stat;
bool ret = false; bool ret = false;
int i, tries = 0; int i, tries = 0;
@ -395,12 +395,12 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
tries = 2; tries = 2;
else else
tries = 1; tries = 1;
hotplug_en = orig = I915_READ(PORT_HOTPLUG_EN);
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
for (i = 0; i < tries ; i++) { for (i = 0; i < tries ; i++) {
/* turn on the FORCE_DETECT */ /* turn on the FORCE_DETECT */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); i915_hotplug_interrupt_update(dev_priv,
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */ /* wait for FORCE_DETECT to go off */
if (wait_for((I915_READ(PORT_HOTPLUG_EN) & if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT) == 0, CRT_HOTPLUG_FORCE_DETECT) == 0,
@ -415,8 +415,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
/* clear the interrupt we just generated, if any */ /* clear the interrupt we just generated, if any */
I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS); I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
/* and put the bits back */ i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
I915_WRITE(PORT_HOTPLUG_EN, orig);
return ret; return ret;
} }

View File

@ -42,13 +42,15 @@
*/ */
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" #define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_SKL); MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
/* /*
* SKL CSR registers for DC5 and DC6 * SKL CSR registers for DC5 and DC6
*/ */
#define CSR_PROGRAM_BASE 0x80000 #define CSR_PROGRAM(i) (0x80000 + (i) * 4)
#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0 #define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
#define CSR_HTP_ADDR_SKL 0x00500034 #define CSR_HTP_ADDR_SKL 0x00500034
#define CSR_SSP_BASE 0x8F074 #define CSR_SSP_BASE 0x8F074
@ -181,11 +183,19 @@ static const struct stepping_info skl_stepping_info[] = {
{'G', '0'}, {'H', '0'}, {'I', '0'} {'G', '0'}, {'H', '0'}, {'I', '0'}
}; };
static struct stepping_info bxt_stepping_info[] = {
{'A', '0'}, {'A', '1'}, {'A', '2'},
{'B', '0'}, {'B', '1'}, {'B', '2'}
};
static char intel_get_stepping(struct drm_device *dev) static char intel_get_stepping(struct drm_device *dev)
{ {
if (IS_SKYLAKE(dev) && (dev->pdev->revision < if (IS_SKYLAKE(dev) && (dev->pdev->revision <
ARRAY_SIZE(skl_stepping_info))) ARRAY_SIZE(skl_stepping_info)))
return skl_stepping_info[dev->pdev->revision].stepping; return skl_stepping_info[dev->pdev->revision].stepping;
else if (IS_BROXTON(dev) && (dev->pdev->revision <
ARRAY_SIZE(bxt_stepping_info)))
return bxt_stepping_info[dev->pdev->revision].stepping;
else else
return -ENODATA; return -ENODATA;
} }
@ -195,6 +205,9 @@ static char intel_get_substepping(struct drm_device *dev)
if (IS_SKYLAKE(dev) && (dev->pdev->revision < if (IS_SKYLAKE(dev) && (dev->pdev->revision <
ARRAY_SIZE(skl_stepping_info))) ARRAY_SIZE(skl_stepping_info)))
return skl_stepping_info[dev->pdev->revision].substepping; return skl_stepping_info[dev->pdev->revision].substepping;
else if (IS_BROXTON(dev) && (dev->pdev->revision <
ARRAY_SIZE(bxt_stepping_info)))
return bxt_stepping_info[dev->pdev->revision].substepping;
else else
return -ENODATA; return -ENODATA;
} }
@ -255,8 +268,7 @@ void intel_csr_load_program(struct drm_device *dev)
mutex_lock(&dev_priv->csr_lock); mutex_lock(&dev_priv->csr_lock);
fw_size = dev_priv->csr.dmc_fw_size; fw_size = dev_priv->csr.dmc_fw_size;
for (i = 0; i < fw_size; i++) for (i = 0; i < fw_size; i++)
I915_WRITE(CSR_PROGRAM_BASE + i * 4, I915_WRITE(CSR_PROGRAM(i), payload[i]);
payload[i]);
for (i = 0; i < dev_priv->csr.mmio_count; i++) { for (i = 0; i < dev_priv->csr.mmio_count; i++) {
I915_WRITE(dev_priv->csr.mmioaddr[i], I915_WRITE(dev_priv->csr.mmioaddr[i],
@ -409,6 +421,8 @@ void intel_csr_ucode_init(struct drm_device *dev)
if (IS_SKYLAKE(dev)) if (IS_SKYLAKE(dev))
csr->fw_path = I915_CSR_SKL; csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT;
else { else {
DRM_ERROR("Unexpected: no known CSR firmware for platform\n"); DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
intel_csr_load_status_set(dev_priv, FW_FAILED); intel_csr_load_status_set(dev_priv, FW_FAILED);
@ -454,10 +468,10 @@ void intel_csr_ucode_fini(struct drm_device *dev)
void assert_csr_loaded(struct drm_i915_private *dev_priv) void assert_csr_loaded(struct drm_i915_private *dev_priv)
{ {
WARN(intel_csr_load_status_get(dev_priv) != FW_LOADED, WARN_ONCE(intel_csr_load_status_get(dev_priv) != FW_LOADED,
"CSR is not loaded.\n"); "CSR is not loaded.\n");
WARN(!I915_READ(CSR_PROGRAM_BASE), WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
"CSR program storage start is NULL\n"); "CSR program storage start is NULL\n");
WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n"); WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
WARN(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n"); WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
} }

View File

@ -414,7 +414,6 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
bool supports_hdmi) bool supports_hdmi)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
u32 iboost_bit = 0; u32 iboost_bit = 0;
int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry,
size; size;
@ -505,11 +504,11 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
BUG(); BUG();
} }
for (i = 0, reg = DDI_BUF_TRANS(port); i < size; i++) { for (i = 0; i < size; i++) {
I915_WRITE(reg, ddi_translations[i].trans1 | iboost_bit); I915_WRITE(DDI_BUF_TRANS_LO(port, i),
reg += 4; ddi_translations[i].trans1 | iboost_bit);
I915_WRITE(reg, ddi_translations[i].trans2); I915_WRITE(DDI_BUF_TRANS_HI(port, i),
reg += 4; ddi_translations[i].trans2);
} }
if (!supports_hdmi) if (!supports_hdmi)
@ -521,10 +520,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
hdmi_level = hdmi_default_entry; hdmi_level = hdmi_default_entry;
/* Entry 9 is for HDMI: */ /* Entry 9 is for HDMI: */
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit); I915_WRITE(DDI_BUF_TRANS_LO(port, i),
reg += 4; ddi_translations_hdmi[hdmi_level].trans1 | iboost_bit);
I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans2); I915_WRITE(DDI_BUF_TRANS_HI(port, i),
reg += 4; ddi_translations_hdmi[hdmi_level].trans2);
} }
/* Program DDI buffers translations for DP. By default, program ports A-D in DP /* Program DDI buffers translations for DP. By default, program ports A-D in DP
@ -2882,7 +2881,7 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
* here just read out lanes 0/1 and output a note if lanes 2/3 differ. * here just read out lanes 0/1 and output a note if lanes 2/3 differ.
*/ */
hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port)); hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port));
if (I915_READ(BXT_PORT_PCS_DW12_LN23(port) != hw_state->pcsdw12)) if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12)
DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
hw_state->pcsdw12, hw_state->pcsdw12,
I915_READ(BXT_PORT_PCS_DW12_LN23(port))); I915_READ(BXT_PORT_PCS_DW12_LN23(port)));

View File

@ -112,6 +112,9 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
struct intel_crtc_state *crtc_state); struct intel_crtc_state *crtc_state);
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
int num_connectors); int num_connectors);
static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev); static void intel_modeset_setup_hw_state(struct drm_device *dev);
typedef struct { typedef struct {
@ -2187,7 +2190,7 @@ static bool need_vtd_wa(struct drm_device *dev)
unsigned int unsigned int
intel_tile_height(struct drm_device *dev, uint32_t pixel_format, intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
uint64_t fb_format_modifier) uint64_t fb_format_modifier, unsigned int plane)
{ {
unsigned int tile_height; unsigned int tile_height;
uint32_t pixel_bytes; uint32_t pixel_bytes;
@ -2203,7 +2206,7 @@ intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
tile_height = 32; tile_height = 32;
break; break;
case I915_FORMAT_MOD_Yf_TILED: case I915_FORMAT_MOD_Yf_TILED:
pixel_bytes = drm_format_plane_cpp(pixel_format, 0); pixel_bytes = drm_format_plane_cpp(pixel_format, plane);
switch (pixel_bytes) { switch (pixel_bytes) {
default: default:
case 1: case 1:
@ -2237,7 +2240,7 @@ intel_fb_align_height(struct drm_device *dev, unsigned int height,
uint32_t pixel_format, uint64_t fb_format_modifier) uint32_t pixel_format, uint64_t fb_format_modifier)
{ {
return ALIGN(height, intel_tile_height(dev, pixel_format, return ALIGN(height, intel_tile_height(dev, pixel_format,
fb_format_modifier)); fb_format_modifier, 0));
} }
static int static int
@ -2260,15 +2263,27 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
info->height = fb->height; info->height = fb->height;
info->pixel_format = fb->pixel_format; info->pixel_format = fb->pixel_format;
info->pitch = fb->pitches[0]; info->pitch = fb->pitches[0];
info->uv_offset = fb->offsets[1];
info->fb_modifier = fb->modifier[0]; info->fb_modifier = fb->modifier[0];
tile_height = intel_tile_height(fb->dev, fb->pixel_format, tile_height = intel_tile_height(fb->dev, fb->pixel_format,
fb->modifier[0]); fb->modifier[0], 0);
tile_pitch = PAGE_SIZE / tile_height; tile_pitch = PAGE_SIZE / tile_height;
info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch); info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
info->height_pages = DIV_ROUND_UP(fb->height, tile_height); info->height_pages = DIV_ROUND_UP(fb->height, tile_height);
info->size = info->width_pages * info->height_pages * PAGE_SIZE; info->size = info->width_pages * info->height_pages * PAGE_SIZE;
if (info->pixel_format == DRM_FORMAT_NV12) {
tile_height = intel_tile_height(fb->dev, fb->pixel_format,
fb->modifier[0], 1);
tile_pitch = PAGE_SIZE / tile_height;
info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch);
info->height_pages_uv = DIV_ROUND_UP(fb->height / 2,
tile_height);
info->size_uv = info->width_pages_uv * info->height_pages_uv *
PAGE_SIZE;
}
return 0; return 0;
} }
@ -2727,6 +2742,9 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
(intel_crtc->config->pipe_src_w - 1) * pixel_size; (intel_crtc->config->pipe_src_w - 1) * pixel_size;
} }
intel_crtc->adjusted_x = x;
intel_crtc->adjusted_y = y;
I915_WRITE(reg, dspcntr); I915_WRITE(reg, dspcntr);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
@ -2827,6 +2845,9 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
} }
} }
intel_crtc->adjusted_x = x;
intel_crtc->adjusted_y = y;
I915_WRITE(reg, dspcntr); I915_WRITE(reg, dspcntr);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
@ -2876,14 +2897,29 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier,
} }
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj,
unsigned int plane)
{ {
const struct i915_ggtt_view *view = &i915_ggtt_view_normal; const struct i915_ggtt_view *view = &i915_ggtt_view_normal;
struct i915_vma *vma;
unsigned char *offset;
if (intel_rotation_90_or_270(intel_plane->base.state->rotation)) if (intel_rotation_90_or_270(intel_plane->base.state->rotation))
view = &i915_ggtt_view_rotated; view = &i915_ggtt_view_rotated;
return i915_gem_obj_ggtt_offset_view(obj, view); vma = i915_gem_obj_to_ggtt_view(obj, view);
if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
view->type))
return -1;
offset = (unsigned char *)vma->node.start;
if (plane == 1) {
offset += vma->ggtt_view.rotation_info.uv_start_page *
PAGE_SIZE;
}
return (unsigned long)offset;
} }
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@ -3039,7 +3075,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
obj = intel_fb_obj(fb); obj = intel_fb_obj(fb);
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format); fb->pixel_format);
surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj); surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
/* /*
* FIXME: intel_plane_state->src, dst aren't set when transitional * FIXME: intel_plane_state->src, dst aren't set when transitional
@ -3066,7 +3102,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
if (intel_rotation_90_or_270(rotation)) { if (intel_rotation_90_or_270(rotation)) {
/* stride = Surface height in tiles */ /* stride = Surface height in tiles */
tile_height = intel_tile_height(dev, fb->pixel_format, tile_height = intel_tile_height(dev, fb->pixel_format,
fb->modifier[0]); fb->modifier[0], 0);
stride = DIV_ROUND_UP(fb->height, tile_height); stride = DIV_ROUND_UP(fb->height, tile_height);
x_offset = stride * tile_height - y - src_h; x_offset = stride * tile_height - y - src_h;
y_offset = x; y_offset = x;
@ -3079,6 +3115,9 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc,
} }
plane_offset = y_offset << 16 | x_offset; plane_offset = y_offset << 16 | x_offset;
intel_crtc->adjusted_x = x_offset;
intel_crtc->adjusted_y = y_offset;
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset); I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
I915_WRITE(PLANE_SIZE(pipe, 0), plane_size); I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
@ -3265,14 +3304,23 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
return pending; return pending;
} }
static void intel_update_pipe_size(struct intel_crtc *crtc) static void intel_update_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
const struct drm_display_mode *adjusted_mode; struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc->base.state);
if (!i915.fastboot) /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
return; crtc->base.mode = crtc->base.state->mode;
DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
if (HAS_DDI(dev))
intel_set_pipe_csc(&crtc->base);
/* /*
* Update pipe size and adjust fitter if needed: the reason for this is * Update pipe size and adjust fitter if needed: the reason for this is
@ -3281,27 +3329,24 @@ static void intel_update_pipe_size(struct intel_crtc *crtc)
* fastboot case, we'll flip, but if we don't update the pipesrc and * fastboot case, we'll flip, but if we don't update the pipesrc and
* pfit state, we'll end up with a big fb scanned out into the wrong * pfit state, we'll end up with a big fb scanned out into the wrong
* sized surface. * sized surface.
*
* To fix this properly, we need to hoist the checks up into
* compute_mode_changes (or above), check the actual pfit state and
* whether the platform allows pfit disable with pipe active, and only
* then update the pipesrc and pfit state, even on the flip path.
*/ */
adjusted_mode = &crtc->config->base.adjusted_mode;
I915_WRITE(PIPESRC(crtc->pipe), I915_WRITE(PIPESRC(crtc->pipe),
((adjusted_mode->crtc_hdisplay - 1) << 16) | ((pipe_config->pipe_src_w - 1) << 16) |
(adjusted_mode->crtc_vdisplay - 1)); (pipe_config->pipe_src_h - 1));
if (!crtc->config->pch_pfit.enabled &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || /* on skylake this is done by detaching scalers */
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { if (INTEL_INFO(dev)->gen >= 9) {
I915_WRITE(PF_CTL(crtc->pipe), 0); skl_detach_scalers(crtc);
I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
I915_WRITE(PF_WIN_SZ(crtc->pipe), 0); if (pipe_config->pch_pfit.enabled)
skylake_pfit_enable(crtc);
} else if (HAS_PCH_SPLIT(dev)) {
if (pipe_config->pch_pfit.enabled)
ironlake_pfit_enable(crtc);
else if (old_crtc_state->pch_pfit.enabled)
ironlake_pfit_disable(crtc, true);
} }
crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay;
crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay;
} }
static void intel_fdi_normal_train(struct drm_crtc *crtc) static void intel_fdi_normal_train(struct drm_crtc *crtc)
@ -4958,7 +5003,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
} }
} }
static void ironlake_pfit_disable(struct intel_crtc *crtc) static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
@ -4966,7 +5011,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc)
/* To avoid upsetting the power well on haswell only disable the pfit if /* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */ * it's in use. The hw state code will make sure we get this right. */
if (crtc->config->pch_pfit.enabled) { if (force || crtc->config->pch_pfit.enabled) {
I915_WRITE(PF_CTL(pipe), 0); I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_POS(pipe), 0); I915_WRITE(PF_WIN_POS(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0); I915_WRITE(PF_WIN_SZ(pipe), 0);
@ -4993,7 +5038,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_disable_pipe(intel_crtc); intel_disable_pipe(intel_crtc);
ironlake_pfit_disable(intel_crtc); ironlake_pfit_disable(intel_crtc, false);
if (intel_crtc->config->has_pch_encoder) if (intel_crtc->config->has_pch_encoder)
ironlake_fdi_disable(crtc); ironlake_fdi_disable(crtc);
@ -5056,7 +5101,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
if (INTEL_INFO(dev)->gen >= 9) if (INTEL_INFO(dev)->gen >= 9)
skylake_scaler_disable(intel_crtc); skylake_scaler_disable(intel_crtc);
else else
ironlake_pfit_disable(intel_crtc); ironlake_pfit_disable(intel_crtc, false);
intel_ddi_disable_pipe_clock(intel_crtc); intel_ddi_disable_pipe_clock(intel_crtc);
@ -11393,8 +11438,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret) if (ret)
goto cleanup_pending; goto cleanup_pending;
work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj) work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
+ intel_crtc->dspaddr_offset; obj, 0);
work->gtt_offset += intel_crtc->dspaddr_offset;
if (mmio_flip) { if (mmio_flip) {
ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring,
@ -12215,7 +12261,6 @@ static bool intel_fuzzy_clock_check(int clock1, int clock2)
base.head) \ base.head) \
if (mask & (1 <<(intel_crtc)->pipe)) if (mask & (1 <<(intel_crtc)->pipe))
static bool static bool
intel_compare_m_n(unsigned int m, unsigned int n, intel_compare_m_n(unsigned int m, unsigned int n,
unsigned int m2, unsigned int n2, unsigned int m2, unsigned int n2,
@ -12436,22 +12481,24 @@ intel_pipe_config_compare(struct drm_device *dev,
DRM_MODE_FLAG_NVSYNC); DRM_MODE_FLAG_NVSYNC);
} }
PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_I(pipe_src_h);
PIPE_CONF_CHECK_X(gmch_pfit.control); PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */ /* pfit ratios are autocomputed by the hw on gen4+ */
if (INTEL_INFO(dev)->gen < 4) if (INTEL_INFO(dev)->gen < 4)
PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
PIPE_CONF_CHECK_I(pch_pfit.enabled); if (!adjust) {
if (current_config->pch_pfit.enabled) { PIPE_CONF_CHECK_I(pipe_src_w);
PIPE_CONF_CHECK_X(pch_pfit.pos); PIPE_CONF_CHECK_I(pipe_src_h);
PIPE_CONF_CHECK_X(pch_pfit.size);
}
PIPE_CONF_CHECK_I(scaler_state.scaler_id); PIPE_CONF_CHECK_I(pch_pfit.enabled);
if (current_config->pch_pfit.enabled) {
PIPE_CONF_CHECK_X(pch_pfit.pos);
PIPE_CONF_CHECK_X(pch_pfit.size);
}
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
}
/* BDW+ don't expose a synchronous way to read the state */ /* BDW+ don't expose a synchronous way to read the state */
if (IS_HASWELL(dev)) if (IS_HASWELL(dev))
@ -12613,7 +12660,8 @@ check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state)
struct intel_crtc_state *pipe_config, *sw_config; struct intel_crtc_state *pipe_config, *sw_config;
bool active; bool active;
if (!needs_modeset(crtc->state)) if (!needs_modeset(crtc->state) &&
!to_intel_crtc_state(crtc->state)->update_pipe)
continue; continue;
__drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state); __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
@ -12909,7 +12957,6 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
return ret; return ret;
} }
static int intel_modeset_checks(struct drm_atomic_state *state) static int intel_modeset_checks(struct drm_atomic_state *state)
{ {
struct drm_device *dev = state->dev; struct drm_device *dev = state->dev;
@ -12995,11 +13042,11 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret) if (ret)
return ret; return ret;
if (i915.fastboot && if (intel_pipe_config_compare(state->dev,
intel_pipe_config_compare(state->dev,
to_intel_crtc_state(crtc->state), to_intel_crtc_state(crtc->state),
pipe_config, true)) { pipe_config, true)) {
crtc_state->mode_changed = false; crtc_state->mode_changed = false;
to_intel_crtc_state(crtc_state)->update_pipe = true;
} }
if (needs_modeset(crtc_state)) { if (needs_modeset(crtc_state)) {
@ -13097,16 +13144,30 @@ static int intel_atomic_commit(struct drm_device *dev,
for_each_crtc_in_state(state, crtc, crtc_state, i) { for_each_crtc_in_state(state, crtc, crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bool modeset = needs_modeset(crtc->state); bool modeset = needs_modeset(crtc->state);
bool update_pipe = !modeset &&
to_intel_crtc_state(crtc->state)->update_pipe;
unsigned long put_domains = 0;
if (modeset && crtc->state->active) { if (modeset && crtc->state->active) {
update_scanline_offset(to_intel_crtc(crtc)); update_scanline_offset(to_intel_crtc(crtc));
dev_priv->display.crtc_enable(crtc); dev_priv->display.crtc_enable(crtc);
} }
if (update_pipe) {
put_domains = modeset_get_crtc_power_domains(crtc);
/* make sure intel_modeset_check_state runs */
any_ms = true;
}
if (!modeset) if (!modeset)
intel_pre_plane_update(intel_crtc); intel_pre_plane_update(intel_crtc);
drm_atomic_helper_commit_planes_on_crtc(crtc_state); drm_atomic_helper_commit_planes_on_crtc(crtc_state);
if (put_domains)
modeset_put_power_domains(dev_priv, put_domains);
intel_post_plane_update(intel_crtc); intel_post_plane_update(intel_crtc);
} }
@ -13422,10 +13483,6 @@ intel_commit_primary_plane(struct drm_plane *plane,
if (!crtc->state->active) if (!crtc->state->active)
return; return;
if (state->visible)
/* FIXME: kill this fastboot hack */
intel_update_pipe_size(intel_crtc);
dev_priv->display.update_primary_plane(crtc, fb, dev_priv->display.update_primary_plane(crtc, fb,
state->src.x1 >> 16, state->src.x1 >> 16,
state->src.y1 >> 16); state->src.y1 >> 16);
@ -13446,6 +13503,9 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_crtc_state *old_intel_state =
to_intel_crtc_state(old_crtc_state);
bool modeset = needs_modeset(crtc->state);
if (intel_crtc->atomic.update_wm_pre) if (intel_crtc->atomic.update_wm_pre)
intel_update_watermarks(crtc); intel_update_watermarks(crtc);
@ -13454,7 +13514,12 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
if (crtc->state->active) if (crtc->state->active)
intel_pipe_update_start(intel_crtc); intel_pipe_update_start(intel_crtc);
if (!needs_modeset(crtc->state) && INTEL_INFO(dev)->gen >= 9) if (modeset)
return;
if (to_intel_crtc_state(crtc->state)->update_pipe)
intel_update_pipe_config(intel_crtc, old_intel_state);
else if (INTEL_INFO(dev)->gen >= 9)
skl_detach_scalers(intel_crtc); skl_detach_scalers(intel_crtc);
} }
@ -14876,9 +14941,17 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
/* restore vblank interrupts to correct state */ /* restore vblank interrupts to correct state */
drm_crtc_vblank_reset(&crtc->base); drm_crtc_vblank_reset(&crtc->base);
if (crtc->active) { if (crtc->active) {
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); struct intel_plane *plane;
update_scanline_offset(crtc);
drm_crtc_vblank_on(&crtc->base); drm_crtc_vblank_on(&crtc->base);
/* Disable everything but the primary plane */
for_each_intel_plane_on_crtc(dev, crtc, plane) {
if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
continue;
plane->disable_plane(&plane->base, &crtc->base);
}
} }
/* We need to sanitize the plane -> pipe mapping first because this will /* We need to sanitize the plane -> pipe mapping first because this will
@ -15041,38 +15114,21 @@ void i915_redisable_vga(struct drm_device *dev)
i915_redisable_vga_power_on(dev); i915_redisable_vga_power_on(dev);
} }
static bool primary_get_hw_state(struct intel_crtc *crtc) static bool primary_get_hw_state(struct intel_plane *plane)
{ {
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
return !!(I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE); return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
} }
static void readout_plane_state(struct intel_crtc *crtc, /* FIXME read out full plane state for all planes */
struct intel_crtc_state *crtc_state) static void readout_plane_state(struct intel_crtc *crtc)
{ {
struct intel_plane *p; struct intel_plane_state *plane_state =
struct intel_plane_state *plane_state; to_intel_plane_state(crtc->base.primary->state);
bool active = crtc_state->base.active;
for_each_intel_plane(crtc->base.dev, p) { plane_state->visible =
if (crtc->pipe != p->pipe) primary_get_hw_state(to_intel_plane(crtc->base.primary));
continue;
plane_state = to_intel_plane_state(p->base.state);
if (p->base.type == DRM_PLANE_TYPE_PRIMARY) {
plane_state->visible = primary_get_hw_state(crtc);
if (plane_state->visible)
crtc->base.state->plane_mask |=
1 << drm_plane_index(&p->base);
} else {
if (active)
p->disable_plane(&p->base, &crtc->base);
plane_state->visible = false;
}
}
} }
static void intel_modeset_readout_hw_state(struct drm_device *dev) static void intel_modeset_readout_hw_state(struct drm_device *dev)
@ -15095,34 +15151,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->base.state->active = crtc->active; crtc->base.state->active = crtc->active;
crtc->base.enabled = crtc->active; crtc->base.enabled = crtc->active;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); readout_plane_state(crtc);
if (crtc->base.state->active) {
intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
/*
* The initial mode needs to be set in order to keep
* the atomic core happy. It wants a valid mode if the
* crtc's enabled, so we do the above call.
*
* At this point some state updated by the connectors
* in their ->detect() callback has not run yet, so
* no recalculation can be done yet.
*
* Even if we could do a recalculation and modeset
* right now it would cause a double modeset if
* fbdev or userspace chooses a different initial mode.
*
* If that happens, someone indicated they wanted a
* mode change, which means it's safe to do a full
* recalculation.
*/
crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
}
crtc->base.hwmode = crtc->config->base.adjusted_mode;
readout_plane_state(crtc, to_intel_crtc_state(crtc->base.state));
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
crtc->base.base.id, crtc->base.base.id,
@ -15181,6 +15210,39 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.name, connector->base.name,
connector->base.encoder ? "enabled" : "disabled"); connector->base.encoder ? "enabled" : "disabled");
} }
for_each_intel_crtc(dev, crtc) {
crtc->base.hwmode = crtc->config->base.adjusted_mode;
memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
if (crtc->base.state->active) {
intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
/*
* The initial mode needs to be set in order to keep
* the atomic core happy. It wants a valid mode if the
* crtc's enabled, so we do the above call.
*
* At this point some state updated by the connectors
* in their ->detect() callback has not run yet, so
* no recalculation can be done yet.
*
* Even if we could do a recalculation and modeset
* right now it would cause a double modeset if
* fbdev or userspace chooses a different initial mode.
*
* If that happens, someone indicated they wanted a
* mode change, which means it's safe to do a full
* recalculation.
*/
crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
update_scanline_offset(crtc);
}
}
} }
/* Scan out the current hw modeset state, /* Scan out the current hw modeset state,

View File

@ -4583,6 +4583,9 @@ static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
case PORT_D: case PORT_D:
bit = SDE_PORTD_HOTPLUG_CPT; bit = SDE_PORTD_HOTPLUG_CPT;
break; break;
case PORT_E:
bit = SDE_PORTE_HOTPLUG_SPT;
break;
default: default:
MISSING_CASE(port->port); MISSING_CASE(port->port);
return false; return false;
@ -4638,11 +4641,14 @@ static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
} }
static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv, static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port) struct intel_digital_port *intel_dig_port)
{ {
struct intel_encoder *intel_encoder = &intel_dig_port->base;
enum port port;
u32 bit; u32 bit;
switch (port->port) { intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
switch (port) {
case PORT_A: case PORT_A:
bit = BXT_DE_PORT_HP_DDIA; bit = BXT_DE_PORT_HP_DDIA;
break; break;
@ -4653,7 +4659,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
bit = BXT_DE_PORT_HP_DDIC; bit = BXT_DE_PORT_HP_DDIC;
break; break;
default: default:
MISSING_CASE(port->port); MISSING_CASE(port);
return false; return false;
} }
@ -4667,7 +4673,7 @@ static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
* *
* Return %true if @port is connected, %false otherwise. * Return %true if @port is connected, %false otherwise.
*/ */
static bool intel_digital_port_connected(struct drm_i915_private *dev_priv, bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port) struct intel_digital_port *port)
{ {
if (HAS_PCH_IBX(dev_priv)) if (HAS_PCH_IBX(dev_priv))
@ -5250,6 +5256,13 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port)
[PORT_E] = DVO_PORT_DPE, [PORT_E] = DVO_PORT_DPE,
}; };
/*
* eDP not supported on g4x. so bail out early just
* for a bit extra safety in case the VBT is bonkers.
*/
if (INTEL_INFO(dev)->gen < 5)
return false;
if (port == PORT_A) if (port == PORT_A)
return true; return true;

View File

@ -338,6 +338,8 @@ struct intel_crtc_state {
#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
unsigned long quirks; unsigned long quirks;
bool update_pipe;
/* Pipe source size (ie. panel fitter input size) /* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space, * All planes will be positioned inside this space,
* and get clipped at the edges. */ * and get clipped at the edges. */
@ -535,6 +537,8 @@ struct intel_crtc {
* gen4+ this only adjusts up to a tile, offsets within a tile are * gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */ * handled in the hw itself (with the TILEOFF register). */
unsigned long dspaddr_offset; unsigned long dspaddr_offset;
int adjusted_x;
int adjusted_y;
struct drm_i915_gem_object *cursor_bo; struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr; uint32_t cursor_addr;
@ -563,8 +567,12 @@ struct intel_crtc {
int scanline_offset; int scanline_offset;
unsigned start_vbl_count; struct {
ktime_t start_vbl_time; unsigned start_vbl_count;
ktime_t start_vbl_time;
int min_vbl, max_vbl;
int scanline_start;
} debug;
struct intel_crtc_atomic_commit atomic; struct intel_crtc_atomic_commit atomic;
@ -1079,7 +1087,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
unsigned int unsigned int
intel_tile_height(struct drm_device *dev, uint32_t pixel_format, intel_tile_height(struct drm_device *dev, uint32_t pixel_format,
uint64_t fb_format_modifier); uint64_t fb_format_modifier, unsigned int plane);
static inline bool static inline bool
intel_rotation_90_or_270(unsigned int rotation) intel_rotation_90_or_270(unsigned int rotation)
@ -1160,7 +1168,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
struct drm_i915_gem_object *obj); struct drm_i915_gem_object *obj,
unsigned int plane);
u32 skl_plane_ctl_format(uint32_t pixel_format); u32 skl_plane_ctl_format(uint32_t pixel_format);
u32 skl_plane_ctl_tiling(uint64_t fb_modifier); u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
u32 skl_plane_ctl_rotation(unsigned int rotation); u32 skl_plane_ctl_rotation(unsigned int rotation);
@ -1210,6 +1220,8 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp);
void intel_edp_drrs_invalidate(struct drm_device *dev, void intel_edp_drrs_invalidate(struct drm_device *dev,
unsigned frontbuffer_bits); unsigned frontbuffer_bits);
void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits); void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config); void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config);
/* intel_dp_mst.c */ /* intel_dp_mst.c */

View File

@ -557,7 +557,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
usleep_range(2000, 2500); usleep_range(2000, 2500);
} }
vlv_disable_dsi_pll(encoder); intel_disable_dsi_pll(encoder);
} }
static void intel_dsi_post_disable(struct intel_encoder *encoder) static void intel_dsi_post_disable(struct intel_encoder *encoder)
@ -737,6 +737,21 @@ static void set_dsi_timings(struct drm_encoder *encoder,
hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio); hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio);
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {
if (IS_BROXTON(dev)) {
/*
* Program hdisplay and vdisplay on MIPI transcoder.
* This is different from calculated hactive and
* vactive, as they are calculated per channel basis,
* whereas these values should be based on resolution.
*/
I915_WRITE(BXT_MIPI_TRANS_HACTIVE(port),
mode->hdisplay);
I915_WRITE(BXT_MIPI_TRANS_VACTIVE(port),
mode->vdisplay);
I915_WRITE(BXT_MIPI_TRANS_VTOTAL(port),
mode->vtotal);
}
I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive); I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
I915_WRITE(MIPI_HFP_COUNT(port), hfp); I915_WRITE(MIPI_HFP_COUNT(port), hfp);
@ -777,16 +792,39 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
} }
for_each_dsi_port(port, intel_dsi->ports) { for_each_dsi_port(port, intel_dsi->ports) {
/* escape clock divider, 20MHz, shared for A and C. if (IS_VALLEYVIEW(dev)) {
* device ready must be off when doing this! txclkesc? */ /*
tmp = I915_READ(MIPI_CTRL(PORT_A)); * escape clock divider, 20MHz, shared for A and C.
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK; * device ready must be off when doing this! txclkesc?
I915_WRITE(MIPI_CTRL(PORT_A), tmp | ESCAPE_CLOCK_DIVIDER_1); */
tmp = I915_READ(MIPI_CTRL(PORT_A));
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
I915_WRITE(MIPI_CTRL(PORT_A), tmp |
ESCAPE_CLOCK_DIVIDER_1);
/* read request priority is per pipe */ /* read request priority is per pipe */
tmp = I915_READ(MIPI_CTRL(port)); tmp = I915_READ(MIPI_CTRL(port));
tmp &= ~READ_REQUEST_PRIORITY_MASK; tmp &= ~READ_REQUEST_PRIORITY_MASK;
I915_WRITE(MIPI_CTRL(port), tmp | READ_REQUEST_PRIORITY_HIGH); I915_WRITE(MIPI_CTRL(port), tmp |
READ_REQUEST_PRIORITY_HIGH);
} else if (IS_BROXTON(dev)) {
/*
* FIXME:
* BXT can connect any PIPE to any MIPI port.
* Select the pipe based on the MIPI port read from
* VBT for now. Pick PIPE A for MIPI port A and C
* for port C.
*/
tmp = I915_READ(MIPI_CTRL(port));
tmp &= ~BXT_PIPE_SELECT_MASK;
if (port == PORT_A)
tmp |= BXT_PIPE_SELECT_A;
else if (port == PORT_C)
tmp |= BXT_PIPE_SELECT_C;
I915_WRITE(MIPI_CTRL(port), tmp);
}
/* XXX: why here, why like this? handling in irq handler?! */ /* XXX: why here, why like this? handling in irq handler?! */
I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff); I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
@ -863,6 +901,17 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
I915_WRITE(MIPI_INIT_COUNT(port), I915_WRITE(MIPI_INIT_COUNT(port),
txclkesc(intel_dsi->escape_clk_div, 100)); txclkesc(intel_dsi->escape_clk_div, 100));
if (IS_BROXTON(dev) && (!intel_dsi->dual_link)) {
/*
* BXT spec says write MIPI_INIT_COUNT for
* both the ports, even if only one is
* getting used. So write the other port
* if not in dual link mode.
*/
I915_WRITE(MIPI_INIT_COUNT(port ==
PORT_A ? PORT_C : PORT_A),
intel_dsi->init_count);
}
/* recovery disables */ /* recovery disables */
I915_WRITE(MIPI_EOT_DISABLE(port), tmp); I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
@ -914,8 +963,8 @@ static void intel_dsi_pre_pll_enable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
intel_dsi_prepare(encoder); intel_dsi_prepare(encoder);
intel_enable_dsi_pll(encoder);
vlv_enable_dsi_pll(encoder);
} }
static enum drm_connector_status static enum drm_connector_status

View File

@ -124,8 +124,8 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct intel_dsi, base.base); return container_of(encoder, struct intel_dsi, base.base);
} }
extern void vlv_enable_dsi_pll(struct intel_encoder *encoder); extern void intel_enable_dsi_pll(struct intel_encoder *encoder);
extern void vlv_disable_dsi_pll(struct intel_encoder *encoder); extern void intel_disable_dsi_pll(struct intel_encoder *encoder);
extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp); extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp);
struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id); struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);

View File

@ -246,7 +246,7 @@ static void vlv_configure_dsi_pll(struct intel_encoder *encoder)
vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl); vlv_cck_write(dev_priv, CCK_REG_DSI_PLL_CONTROL, dsi_mnp.dsi_pll_ctrl);
} }
void vlv_enable_dsi_pll(struct intel_encoder *encoder) static void vlv_enable_dsi_pll(struct intel_encoder *encoder)
{ {
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp; u32 tmp;
@ -276,7 +276,7 @@ void vlv_enable_dsi_pll(struct intel_encoder *encoder)
DRM_DEBUG_KMS("DSI PLL locked\n"); DRM_DEBUG_KMS("DSI PLL locked\n");
} }
void vlv_disable_dsi_pll(struct intel_encoder *encoder) static void vlv_disable_dsi_pll(struct intel_encoder *encoder)
{ {
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 tmp; u32 tmp;
@ -293,6 +293,26 @@ void vlv_disable_dsi_pll(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->sb_lock); mutex_unlock(&dev_priv->sb_lock);
} }
static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 val;
DRM_DEBUG_KMS("\n");
val = I915_READ(BXT_DSI_PLL_ENABLE);
val &= ~BXT_DSI_PLL_DO_ENABLE;
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/*
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE)
& BXT_DSI_PLL_LOCKED) == 0, 1))
DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
}
static void assert_bpp_mismatch(int pixel_format, int pipe_bpp) static void assert_bpp_mismatch(int pixel_format, int pipe_bpp)
{ {
int bpp = dsi_pixel_format_bpp(pixel_format); int bpp = dsi_pixel_format_bpp(pixel_format);
@ -363,3 +383,106 @@ u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp)
return pclk; return pclk;
} }
static bool bxt_configure_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
u8 dsi_ratio;
u32 dsi_clk;
u32 val;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
intel_dsi->lane_count);
/*
* From clock diagram, to get PLL ratio divider, divide double of DSI
* link rate (i.e., 2*8x=16x frequency value) by ref clock. Make sure to
* round 'up' the result
*/
dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
return false;
}
/*
* Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
* Spec says both have to be programmed, even if one is not getting
* used. Configure MIPI_CLOCK_CTL dividers in modeset
*/
val = I915_READ(BXT_DSI_PLL_CTL);
val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
val &= ~BXT_DSI_FREQ_SEL_MASK;
val &= ~BXT_DSI_PLL_RATIO_MASK;
val |= (dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2);
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
*/
if (dsi_ratio <= 50) {
val &= ~BXT_DSI_PLL_PVD_RATIO_MASK;
val |= BXT_DSI_PLL_PVD_RATIO_1;
}
I915_WRITE(BXT_DSI_PLL_CTL, val);
POSTING_READ(BXT_DSI_PLL_CTL);
return true;
}
static void bxt_enable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
u32 val;
DRM_DEBUG_KMS("\n");
val = I915_READ(BXT_DSI_PLL_ENABLE);
if (val & BXT_DSI_PLL_DO_ENABLE) {
WARN(1, "DSI PLL already enabled. Disabling it.\n");
val &= ~BXT_DSI_PLL_DO_ENABLE;
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
}
/* Configure PLL vales */
if (!bxt_configure_dsi_pll(encoder)) {
DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n");
return;
}
/* Enable DSI PLL */
val = I915_READ(BXT_DSI_PLL_ENABLE);
val |= BXT_DSI_PLL_DO_ENABLE;
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) {
DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
return;
}
DRM_DEBUG_KMS("DSI PLL locked\n");
}
void intel_enable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
if (IS_VALLEYVIEW(dev))
vlv_enable_dsi_pll(encoder);
else if (IS_BROXTON(dev))
bxt_enable_dsi_pll(encoder);
}
void intel_disable_dsi_pll(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
if (IS_VALLEYVIEW(dev))
vlv_disable_dsi_pll(encoder);
else if (IS_BROXTON(dev))
bxt_disable_dsi_pll(encoder);
}

View File

@ -41,6 +41,19 @@
#include "intel_drv.h" #include "intel_drv.h"
#include "i915_drv.h" #include "i915_drv.h"
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
* origin so the x and y offsets can actually fit the registers. As a
* consequence, the fence doesn't really start exactly at the display plane
* address we program because it starts at the real start of the buffer, so we
* have to take this into consideration here.
*/
static unsigned int get_crtc_fence_y_offset(struct intel_crtc *crtc)
{
return crtc->base.y - crtc->adjusted_y;
}
static void i8xx_fbc_disable(struct drm_i915_private *dev_priv) static void i8xx_fbc_disable(struct drm_i915_private *dev_priv)
{ {
u32 fbc_ctl; u32 fbc_ctl;
@ -88,7 +101,7 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc)
/* Clear old tags */ /* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
I915_WRITE(FBC_TAG + (i * 4), 0); I915_WRITE(FBC_TAG(i), 0);
if (IS_GEN4(dev_priv)) { if (IS_GEN4(dev_priv)) {
u32 fbc_ctl2; u32 fbc_ctl2;
@ -97,7 +110,7 @@ static void i8xx_fbc_enable(struct intel_crtc *crtc)
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane); fbc_ctl2 |= FBC_CTL_PLANE(crtc->plane);
I915_WRITE(FBC_CONTROL2, fbc_ctl2); I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->base.y); I915_WRITE(FBC_FENCE_OFF, get_crtc_fence_y_offset(crtc));
} }
/* enable it... */ /* enable it... */
@ -135,7 +148,7 @@ static void g4x_fbc_enable(struct intel_crtc *crtc)
dpfc_ctl |= DPFC_CTL_LIMIT_1X; dpfc_ctl |= DPFC_CTL_LIMIT_1X;
dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
I915_WRITE(DPFC_FENCE_YOFF, crtc->base.y); I915_WRITE(DPFC_FENCE_YOFF, get_crtc_fence_y_offset(crtc));
/* enable it... */ /* enable it... */
I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@ -177,6 +190,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 dpfc_ctl; u32 dpfc_ctl;
int threshold = dev_priv->fbc.threshold; int threshold = dev_priv->fbc.threshold;
unsigned int y_offset;
dev_priv->fbc.enabled = true; dev_priv->fbc.enabled = true;
@ -200,7 +214,8 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
if (IS_GEN5(dev_priv)) if (IS_GEN5(dev_priv))
dpfc_ctl |= obj->fence_reg; dpfc_ctl |= obj->fence_reg;
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->base.y); y_offset = get_crtc_fence_y_offset(crtc);
I915_WRITE(ILK_DPFC_FENCE_YOFF, y_offset);
I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID); I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */ /* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@ -208,7 +223,7 @@ static void ilk_fbc_enable(struct intel_crtc *crtc)
if (IS_GEN6(dev_priv)) { if (IS_GEN6(dev_priv)) {
I915_WRITE(SNB_DPFC_CTL_SA, I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg); SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y); I915_WRITE(DPFC_CPU_FENCE_OFFSET, y_offset);
} }
intel_fbc_nuke(dev_priv); intel_fbc_nuke(dev_priv);
@ -272,23 +287,23 @@ static void gen7_fbc_enable(struct intel_crtc *crtc)
if (dev_priv->fbc.false_color) if (dev_priv->fbc.false_color)
dpfc_ctl |= FBC_CTL_FALSE_COLOR; dpfc_ctl |= FBC_CTL_FALSE_COLOR;
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
if (IS_IVYBRIDGE(dev_priv)) { if (IS_IVYBRIDGE(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */ /* WaFbcAsynchFlipDisableFbcQueue:ivb */
I915_WRITE(ILK_DISPLAY_CHICKEN1, I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) | I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS); ILK_FBCQ_DIS);
} else { } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe), I915_WRITE(CHICKEN_PIPESL_1(crtc->pipe),
I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) | I915_READ(CHICKEN_PIPESL_1(crtc->pipe)) |
HSW_FBCQ_DIS); HSW_FBCQ_DIS);
} }
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
I915_WRITE(SNB_DPFC_CTL_SA, I915_WRITE(SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | obj->fence_reg); SNB_CPU_FENCE_ENABLE | obj->fence_reg);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->base.y); I915_WRITE(DPFC_CPU_FENCE_OFFSET, get_crtc_fence_y_offset(crtc));
intel_fbc_nuke(dev_priv); intel_fbc_nuke(dev_priv);
@ -308,6 +323,18 @@ bool intel_fbc_enabled(struct drm_i915_private *dev_priv)
return dev_priv->fbc.enabled; return dev_priv->fbc.enabled;
} }
static void intel_fbc_enable(struct intel_crtc *crtc,
const struct drm_framebuffer *fb)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
dev_priv->fbc.enable_fbc(crtc);
dev_priv->fbc.crtc = crtc;
dev_priv->fbc.fb_id = fb->base.id;
dev_priv->fbc.y = crtc->base.y;
}
static void intel_fbc_work_fn(struct work_struct *__work) static void intel_fbc_work_fn(struct work_struct *__work)
{ {
struct intel_fbc_work *work = struct intel_fbc_work *work =
@ -321,13 +348,8 @@ static void intel_fbc_work_fn(struct work_struct *__work)
/* Double check that we haven't switched fb without cancelling /* Double check that we haven't switched fb without cancelling
* the prior work. * the prior work.
*/ */
if (crtc_fb == work->fb) { if (crtc_fb == work->fb)
dev_priv->fbc.enable_fbc(work->crtc); intel_fbc_enable(work->crtc, work->fb);
dev_priv->fbc.crtc = work->crtc;
dev_priv->fbc.fb_id = crtc_fb->base.id;
dev_priv->fbc.y = work->crtc->base.y;
}
dev_priv->fbc.fbc_work = NULL; dev_priv->fbc.fbc_work = NULL;
} }
@ -361,7 +383,7 @@ static void intel_fbc_cancel_work(struct drm_i915_private *dev_priv)
dev_priv->fbc.fbc_work = NULL; dev_priv->fbc.fbc_work = NULL;
} }
static void intel_fbc_enable(struct intel_crtc *crtc) static void intel_fbc_schedule_enable(struct intel_crtc *crtc)
{ {
struct intel_fbc_work *work; struct intel_fbc_work *work;
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
@ -373,7 +395,7 @@ static void intel_fbc_enable(struct intel_crtc *crtc)
work = kzalloc(sizeof(*work), GFP_KERNEL); work = kzalloc(sizeof(*work), GFP_KERNEL);
if (work == NULL) { if (work == NULL) {
DRM_ERROR("Failed to allocate FBC work structure\n"); DRM_ERROR("Failed to allocate FBC work structure\n");
dev_priv->fbc.enable_fbc(crtc); intel_fbc_enable(crtc, crtc->base.primary->fb);
return; return;
} }
@ -473,6 +495,12 @@ const char *intel_no_fbc_reason_str(enum no_fbc_reason reason)
return "rotation unsupported"; return "rotation unsupported";
case FBC_IN_DBG_MASTER: case FBC_IN_DBG_MASTER:
return "Kernel debugger is active"; return "Kernel debugger is active";
case FBC_BAD_STRIDE:
return "framebuffer stride not supported";
case FBC_PIXEL_RATE:
return "pixel rate is too big";
case FBC_PIXEL_FORMAT:
return "pixel format is invalid";
default: default:
MISSING_CASE(reason); MISSING_CASE(reason);
return "unknown reason"; return "unknown reason";
@ -542,6 +570,16 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
{ {
int compression_threshold = 1; int compression_threshold = 1;
int ret; int ret;
u64 end;
/* The FBC hardware for BDW/SKL doesn't have access to the stolen
* reserved range size, so it always assumes the maximum (8mb) is used.
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
else
end = dev_priv->gtt.stolen_usable_size;
/* HACK: This code depends on what we will do in *_enable_fbc. If that /* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well. * code changes, this code needs to change as well.
@ -551,7 +589,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
*/ */
/* Try to over-allocate to reduce reallocations and fragmentation. */ /* Try to over-allocate to reduce reallocations and fragmentation. */
ret = i915_gem_stolen_insert_node(dev_priv, node, size <<= 1, 4096); ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size <<= 1,
4096, 0, end);
if (ret == 0) if (ret == 0)
return compression_threshold; return compression_threshold;
@ -561,7 +600,8 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
(fb_cpp == 2 && compression_threshold == 2)) (fb_cpp == 2 && compression_threshold == 2))
return 0; return 0;
ret = i915_gem_stolen_insert_node(dev_priv, node, size >>= 1, 4096); ret = i915_gem_stolen_insert_node_in_range(dev_priv, node, size >>= 1,
4096, 0, end);
if (ret && INTEL_INFO(dev_priv)->gen <= 4) { if (ret && INTEL_INFO(dev_priv)->gen <= 4) {
return 0; return 0;
} else if (ret) { } else if (ret) {
@ -613,8 +653,9 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv, int size,
dev_priv->fbc.uncompressed_size = size; dev_priv->fbc.uncompressed_size = size;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
size); dev_priv->fbc.compressed_fb.size,
dev_priv->fbc.threshold);
return 0; return 0;
@ -664,6 +705,50 @@ static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size,
return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp); return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp);
} }
static bool stride_is_valid(struct drm_i915_private *dev_priv,
unsigned int stride)
{
/* These should have been caught earlier. */
WARN_ON(stride < 512);
WARN_ON((stride & (64 - 1)) != 0);
/* Below are the additional FBC restrictions. */
if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
return stride == 4096 || stride == 8192;
if (IS_GEN4(dev_priv) && !IS_G4X(dev_priv) && stride < 2048)
return false;
if (stride > 16384)
return false;
return true;
}
static bool pixel_format_is_valid(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
switch (fb->pixel_format) {
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
return true;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_RGB565:
/* 16bpp not supported on gen2 */
if (IS_GEN2(dev))
return false;
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(dev_priv))
return false;
return true;
default:
return false;
}
}
/** /**
* __intel_fbc_update - enable/disable FBC as needed, unlocked * __intel_fbc_update - enable/disable FBC as needed, unlocked
* @dev_priv: i915 device instance * @dev_priv: i915 device instance
@ -774,12 +859,30 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
goto out_disable; goto out_disable;
} }
if (!stride_is_valid(dev_priv, fb->pitches[0])) {
set_no_fbc_reason(dev_priv, FBC_BAD_STRIDE);
goto out_disable;
}
if (!pixel_format_is_valid(fb)) {
set_no_fbc_reason(dev_priv, FBC_PIXEL_FORMAT);
goto out_disable;
}
/* If the kernel debugger is active, always disable compression */ /* If the kernel debugger is active, always disable compression */
if (in_dbg_master()) { if (in_dbg_master()) {
set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER); set_no_fbc_reason(dev_priv, FBC_IN_DBG_MASTER);
goto out_disable; goto out_disable;
} }
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
ilk_pipe_pixel_rate(intel_crtc->config) >=
dev_priv->cdclk_freq * 95 / 100) {
set_no_fbc_reason(dev_priv, FBC_PIXEL_RATE);
goto out_disable;
}
if (intel_fbc_setup_cfb(dev_priv, obj->base.size, if (intel_fbc_setup_cfb(dev_priv, obj->base.size,
drm_format_plane_cpp(fb->pixel_format, 0))) { drm_format_plane_cpp(fb->pixel_format, 0))) {
set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL); set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL);
@ -824,7 +927,7 @@ static void __intel_fbc_update(struct drm_i915_private *dev_priv)
__intel_fbc_disable(dev_priv); __intel_fbc_disable(dev_priv);
} }
intel_fbc_enable(intel_crtc); intel_fbc_schedule_enable(intel_crtc);
dev_priv->fbc.no_fbc_reason = FBC_OK; dev_priv->fbc.no_fbc_reason = FBC_OK;
return; return;

View File

@ -541,16 +541,13 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
struct intel_crtc *intel_crtc; struct intel_crtc *intel_crtc;
unsigned int max_size = 0; unsigned int max_size = 0;
if (!i915.fastboot)
return false;
/* Find the largest fb */ /* Find the largest fb */
for_each_crtc(dev, crtc) { for_each_crtc(dev, crtc) {
struct drm_i915_gem_object *obj = struct drm_i915_gem_object *obj =
intel_fb_obj(crtc->primary->state->fb); intel_fb_obj(crtc->primary->state->fb);
intel_crtc = to_intel_crtc(crtc); intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active || !obj) { if (!crtc->state->active || !obj) {
DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n", DRM_DEBUG_KMS("pipe %c not active or no fb, skipping\n",
pipe_name(intel_crtc->pipe)); pipe_name(intel_crtc->pipe));
continue; continue;
@ -575,7 +572,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
intel_crtc = to_intel_crtc(crtc); intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active) { if (!crtc->state->active) {
DRM_DEBUG_KMS("pipe %c not active, skipping\n", DRM_DEBUG_KMS("pipe %c not active, skipping\n",
pipe_name(intel_crtc->pipe)); pipe_name(intel_crtc->pipe));
continue; continue;
@ -638,7 +635,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
for_each_crtc(dev, crtc) { for_each_crtc(dev, crtc) {
intel_crtc = to_intel_crtc(crtc); intel_crtc = to_intel_crtc(crtc);
if (!intel_crtc->active) if (!crtc->state->active)
continue; continue;
WARN(!crtc->primary->fb, WARN(!crtc->primary->fb,

View File

@ -330,6 +330,13 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
/* Enable MIA caching. GuC clock gating is disabled. */ /* Enable MIA caching. GuC clock gating is disabled. */
I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE); I915_WRITE(GUC_SHIM_CONTROL, GUC_SHIM_CONTROL_VALUE);
/* WaDisableMinuteIaClockGating:skl,bxt */
if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) {
I915_WRITE(GUC_SHIM_CONTROL, (I915_READ(GUC_SHIM_CONTROL) &
~GUC_ENABLE_MIA_CLOCK_GATING));
}
/* WaC6DisallowByGfxPause*/ /* WaC6DisallowByGfxPause*/
I915_WRITE(GEN6_GFXPAUSE, 0x30FFF); I915_WRITE(GEN6_GFXPAUSE, 0x30FFF);

View File

@ -1329,22 +1329,23 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
} }
static bool static bool
intel_hdmi_set_edid(struct drm_connector *connector) intel_hdmi_set_edid(struct drm_connector *connector, bool force)
{ {
struct drm_i915_private *dev_priv = to_i915(connector->dev); struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_encoder *intel_encoder = struct intel_encoder *intel_encoder =
&hdmi_to_dig_port(intel_hdmi)->base; &hdmi_to_dig_port(intel_hdmi)->base;
enum intel_display_power_domain power_domain; enum intel_display_power_domain power_domain;
struct edid *edid; struct edid *edid = NULL;
bool connected = false; bool connected = false;
power_domain = intel_display_port_power_domain(intel_encoder); power_domain = intel_display_port_power_domain(intel_encoder);
intel_display_power_get(dev_priv, power_domain); intel_display_power_get(dev_priv, power_domain);
edid = drm_get_edid(connector, if (force)
intel_gmbus_get_adapter(dev_priv, edid = drm_get_edid(connector,
intel_hdmi->ddc_bus)); intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
intel_display_power_put(dev_priv, power_domain); intel_display_power_put(dev_priv, power_domain);
@ -1372,13 +1373,26 @@ static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force) intel_hdmi_detect(struct drm_connector *connector, bool force)
{ {
enum drm_connector_status status; enum drm_connector_status status;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
bool live_status = false;
unsigned int retry = 3;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name); connector->base.id, connector->name);
while (!live_status && --retry) {
live_status = intel_digital_port_connected(dev_priv,
hdmi_to_dig_port(intel_hdmi));
mdelay(10);
}
if (!live_status)
DRM_DEBUG_KMS("Live status not up!");
intel_hdmi_unset_edid(connector); intel_hdmi_unset_edid(connector);
if (intel_hdmi_set_edid(connector)) { if (intel_hdmi_set_edid(connector, live_status)) {
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI; hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
@ -1402,7 +1416,7 @@ intel_hdmi_force(struct drm_connector *connector)
if (connector->status != connector_status_connected) if (connector->status != connector_status_connected)
return; return;
intel_hdmi_set_edid(connector); intel_hdmi_set_edid(connector, true);
hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI; hdmi_to_dig_port(intel_hdmi)->base.type = INTEL_OUTPUT_HDMI;
} }

View File

@ -221,6 +221,9 @@ enum {
#define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 #define CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
static int intel_lr_context_pin(struct drm_i915_gem_request *rq); static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
struct drm_i915_gem_object *default_ctx_obj);
/** /**
* intel_sanitize_enable_execlists() - sanitize i915.enable_execlists * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
@ -277,10 +280,18 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
return lrca >> 12; return lrca >> 12;
} }
static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) &&
(ring->id == VCS || ring->id == VCS2);
}
uint64_t intel_lr_context_descriptor(struct intel_context *ctx, uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
struct intel_engine_cs *ring) struct intel_engine_cs *ring)
{ {
struct drm_device *dev = ring->dev;
struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
uint64_t desc; uint64_t desc;
uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) + uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
@ -301,10 +312,8 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
/* desc |= GEN8_CTX_FORCE_RESTORE; */ /* desc |= GEN8_CTX_FORCE_RESTORE; */
/* WaEnableForceRestoreInCtxtDescForVCS:skl */ /* WaEnableForceRestoreInCtxtDescForVCS:skl */
if (IS_GEN9(dev) && /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
INTEL_REVID(dev) <= SKL_REVID_B0 && if (disable_lite_restore_wa(ring))
(ring->id == BCS || ring->id == VCS ||
ring->id == VECS || ring->id == VCS2))
desc |= GEN8_CTX_FORCE_RESTORE; desc |= GEN8_CTX_FORCE_RESTORE;
return desc; return desc;
@ -340,7 +349,7 @@ static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0])); I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0]));
/* ELSP is a wo register, use another nearby reg for posting */ /* ELSP is a wo register, use another nearby reg for posting */
POSTING_READ_FW(RING_EXECLIST_STATUS(ring)); POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring));
intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
spin_unlock(&dev_priv->uncore.lock); spin_unlock(&dev_priv->uncore.lock);
} }
@ -495,7 +504,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
u32 status_pointer; u32 status_pointer;
u8 read_pointer; u8 read_pointer;
u8 write_pointer; u8 write_pointer;
u32 status; u32 status = 0;
u32 status_id; u32 status_id;
u32 submit_contexts = 0; u32 submit_contexts = 0;
@ -510,10 +519,8 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
while (read_pointer < write_pointer) { while (read_pointer < write_pointer) {
read_pointer++; read_pointer++;
status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) + status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer % 6));
(read_pointer % 6) * 8); status_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer % 6));
status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
(read_pointer % 6) * 8 + 4);
if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
continue; continue;
@ -533,8 +540,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
} }
} }
if (submit_contexts != 0) if (disable_lite_restore_wa(ring)) {
/* Prevent a ctx to preempt itself */
if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
(submit_contexts != 0))
execlists_context_unqueue(ring);
} else if (submit_contexts != 0) {
execlists_context_unqueue(ring); execlists_context_unqueue(ring);
}
spin_unlock(&ring->execlist_lock); spin_unlock(&ring->execlist_lock);
@ -787,8 +800,7 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
/** /**
* intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
* *
* @request: The request to start some new work for * @req: The request to start some new work for
* @ctx: Logical ring context whose ringbuffer is being prepared.
* @num_dwords: number of DWORDs that we plan to write to the ringbuffer. * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
* *
* The ringbuffer might not be ready to accept the commands right away (maybe it needs to * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
@ -1008,39 +1020,54 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
return 0; return 0;
} }
static int intel_lr_context_pin(struct drm_i915_gem_request *rq) static int intel_lr_context_do_pin(struct intel_engine_cs *ring,
struct drm_i915_gem_object *ctx_obj,
struct intel_ringbuffer *ringbuf)
{ {
struct drm_i915_private *dev_priv = rq->i915; struct drm_device *dev = ring->dev;
struct intel_engine_cs *ring = rq->ring; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = rq->ringbuf;
int ret = 0; int ret = 0;
WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
if (rq->ctx->engine[ring->id].pin_count++ == 0) { ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
PIN_OFFSET_BIAS | GUC_WOPCM_TOP); if (ret)
if (ret) return ret;
goto reset_pin_count;
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret) if (ret)
goto unpin_ctx_obj; goto unpin_ctx_obj;
ctx_obj->dirty = true; ctx_obj->dirty = true;
/* Invalidate GuC TLB. */ /* Invalidate GuC TLB. */
if (i915.enable_guc_submission) if (i915.enable_guc_submission)
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
return ret; return ret;
unpin_ctx_obj: unpin_ctx_obj:
i915_gem_object_ggtt_unpin(ctx_obj); i915_gem_object_ggtt_unpin(ctx_obj);
return ret;
}
static int intel_lr_context_pin(struct drm_i915_gem_request *rq)
{
int ret = 0;
struct intel_engine_cs *ring = rq->ring;
struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
struct intel_ringbuffer *ringbuf = rq->ringbuf;
if (rq->ctx->engine[ring->id].pin_count++ == 0) {
ret = intel_lr_context_do_pin(ring, ctx_obj, ringbuf);
if (ret)
goto reset_pin_count;
}
return ret;
reset_pin_count: reset_pin_count:
rq->ctx->engine[ring->id].pin_count = 0; rq->ctx->engine[ring->id].pin_count = 0;
return ret; return ret;
} }
@ -1225,9 +1252,10 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring,
/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
if (IS_BROADWELL(ring->dev)) { if (IS_BROADWELL(ring->dev)) {
index = gen8_emit_flush_coherentl3_wa(ring, batch, index); int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index);
if (index < 0) if (rc < 0)
return index; return rc;
index = rc;
} }
/* WaClearSlmSpaceAtContextSwitch:bdw,chv */ /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
@ -1450,6 +1478,9 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
lrc_setup_hardware_status_page(ring,
ring->default_context->engine[ring->id].state);
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@ -1889,7 +1920,21 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
if (ret) if (ret)
return ret; return ret;
ret = intel_lr_context_deferred_create(ring->default_context, ring); ret = intel_lr_context_deferred_alloc(ring->default_context, ring);
if (ret)
return ret;
/* As this is the default context, always pin it */
ret = intel_lr_context_do_pin(
ring,
ring->default_context->engine[ring->id].state,
ring->default_context->engine[ring->id].ringbuf);
if (ret) {
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
ring->name, ret);
return ret;
}
return ret; return ret;
} }
@ -2112,14 +2157,8 @@ int intel_logical_rings_init(struct drm_device *dev)
goto cleanup_vebox_ring; goto cleanup_vebox_ring;
} }
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (ret)
goto cleanup_bsd2_ring;
return 0; return 0;
cleanup_bsd2_ring:
intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
cleanup_vebox_ring: cleanup_vebox_ring:
intel_logical_ring_cleanup(&dev_priv->ring[VECS]); intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
cleanup_blt_ring: cleanup_blt_ring:
@ -2389,7 +2428,7 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
} }
/** /**
* intel_lr_context_deferred_create() - create the LRC specific bits of a context * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context
* @ctx: LR context to create. * @ctx: LR context to create.
* @ring: engine to be used with the context. * @ring: engine to be used with the context.
* *
@ -2401,12 +2440,11 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
* *
* Return: non-zero on error. * Return: non-zero on error.
*/ */
int intel_lr_context_deferred_create(struct intel_context *ctx,
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *ring) struct intel_engine_cs *ring)
{ {
const bool is_global_default_ctx = (ctx == ring->default_context);
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *ctx_obj; struct drm_i915_gem_object *ctx_obj;
uint32_t context_size; uint32_t context_size;
struct intel_ringbuffer *ringbuf; struct intel_ringbuffer *ringbuf;
@ -2426,82 +2464,49 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
return -ENOMEM; return -ENOMEM;
} }
if (is_global_default_ctx) {
ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN,
PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
if (ret) {
DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
ret);
drm_gem_object_unreference(&ctx_obj->base);
return ret;
}
/* Invalidate GuC TLB. */
if (i915.enable_guc_submission)
I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}
ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE); ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE);
if (IS_ERR(ringbuf)) { if (IS_ERR(ringbuf)) {
ret = PTR_ERR(ringbuf); ret = PTR_ERR(ringbuf);
goto error_unpin_ctx; goto error_deref_obj;
}
if (is_global_default_ctx) {
ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
if (ret) {
DRM_ERROR(
"Failed to pin and map ringbuffer %s: %d\n",
ring->name, ret);
goto error_ringbuf;
}
} }
ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
goto error; goto error_ringbuf;
} }
ctx->engine[ring->id].ringbuf = ringbuf; ctx->engine[ring->id].ringbuf = ringbuf;
ctx->engine[ring->id].state = ctx_obj; ctx->engine[ring->id].state = ctx_obj;
if (ctx == ring->default_context) if (ctx != ring->default_context && ring->init_context) {
lrc_setup_hardware_status_page(ring, ctx_obj); struct drm_i915_gem_request *req;
else if (ring->id == RCS && !ctx->rcs_initialized) {
if (ring->init_context) {
struct drm_i915_gem_request *req;
ret = i915_gem_request_alloc(ring, ctx, &req); ret = i915_gem_request_alloc(ring,
if (ret) ctx, &req);
return ret; if (ret) {
DRM_ERROR("ring create req: %d\n",
ret = ring->init_context(req); ret);
if (ret) { goto error_ringbuf;
DRM_ERROR("ring init context: %d\n", ret);
i915_gem_request_cancel(req);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
goto error;
}
i915_add_request_no_flush(req);
} }
ctx->rcs_initialized = true; ret = ring->init_context(req);
if (ret) {
DRM_ERROR("ring init context: %d\n",
ret);
i915_gem_request_cancel(req);
goto error_ringbuf;
}
i915_add_request_no_flush(req);
} }
return 0; return 0;
error:
if (is_global_default_ctx)
intel_unpin_ringbuffer_obj(ringbuf);
error_ringbuf: error_ringbuf:
intel_ringbuffer_free(ringbuf); intel_ringbuffer_free(ringbuf);
error_unpin_ctx: error_deref_obj:
if (is_global_default_ctx)
i915_gem_object_ggtt_unpin(ctx_obj);
drm_gem_object_unreference(&ctx_obj->base); drm_gem_object_unreference(&ctx_obj->base);
ctx->engine[ring->id].ringbuf = NULL;
ctx->engine[ring->id].state = NULL;
return ret; return ret;
} }

View File

@ -28,12 +28,14 @@
/* Execlists regs */ /* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230) #define RING_ELSP(ring) ((ring)->mmio_base+0x230)
#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) #define RING_EXECLIST_STATUS_LO(ring) ((ring)->mmio_base+0x234)
#define RING_EXECLIST_STATUS_HI(ring) ((ring)->mmio_base+0x234 + 4)
#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
#define CTX_CTRL_RS_CTX_ENABLE (1 << 1) #define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370) #define RING_CONTEXT_STATUS_BUF_LO(ring, i) ((ring)->mmio_base+0x370 + (i) * 8)
#define RING_CONTEXT_STATUS_BUF_HI(ring, i) ((ring)->mmio_base+0x370 + (i) * 8 + 4)
#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0) #define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
/* Logical Rings */ /* Logical Rings */
@ -75,8 +77,8 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
#define LRC_STATE_PN (LRC_PPHWSP_PN + 1) #define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
void intel_lr_context_free(struct intel_context *ctx); void intel_lr_context_free(struct intel_context *ctx);
int intel_lr_context_deferred_create(struct intel_context *ctx, int intel_lr_context_deferred_alloc(struct intel_context *ctx,
struct intel_engine_cs *ring); struct intel_engine_cs *ring);
void intel_lr_context_unpin(struct drm_i915_gem_request *req); void intel_lr_context_unpin(struct drm_i915_gem_request *req);
void intel_lr_context_reset(struct drm_device *dev, void intel_lr_context_reset(struct drm_device *dev,
struct intel_context *ctx); struct intel_context *ctx);

View File

@ -821,7 +821,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
return; return;
/* /*
* Do not disable backlight on the vgaswitcheroo path. When switching * Do not disable backlight on the vga_switcheroo path. When switching
* away from i915, the other client may depend on i915 to handle the * away from i915, the other client may depend on i915 to handle the
* backlight. This will leave the backlight on unnecessarily when * backlight. This will leave the backlight on unnecessarily when
* another client is not activated. * another client is not activated.

View File

@ -134,6 +134,12 @@ static void bxt_init_clock_gating(struct drm_device *dev)
*/ */
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
} }
/* WaSetClckGatingDisableMedia:bxt */
if (INTEL_REVID(dev) == BXT_REVID_A0) {
I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
}
} }
static void i915_pineview_get_mem_freq(struct drm_device *dev) static void i915_pineview_get_mem_freq(struct drm_device *dev)
@ -3679,6 +3685,26 @@ static void skl_update_other_pipe_wm(struct drm_device *dev,
} }
} }
static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe)
{
watermarks->wm_linetime[pipe] = 0;
memset(watermarks->plane[pipe], 0,
sizeof(uint32_t) * 8 * I915_MAX_PLANES);
memset(watermarks->cursor[pipe], 0, sizeof(uint32_t) * 8);
memset(watermarks->plane_trans[pipe],
0, sizeof(uint32_t) * I915_MAX_PLANES);
watermarks->cursor_trans[pipe] = 0;
/* Clear ddb entries for pipe */
memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry));
memset(&watermarks->ddb.plane[pipe], 0,
sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
memset(&watermarks->ddb.y_plane[pipe], 0,
sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
memset(&watermarks->ddb.cursor[pipe], 0, sizeof(struct skl_ddb_entry));
}
static void skl_update_wm(struct drm_crtc *crtc) static void skl_update_wm(struct drm_crtc *crtc)
{ {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@ -3689,7 +3715,11 @@ static void skl_update_wm(struct drm_crtc *crtc)
struct skl_pipe_wm pipe_wm = {}; struct skl_pipe_wm pipe_wm = {};
struct intel_wm_config config = {}; struct intel_wm_config config = {};
memset(results, 0, sizeof(*results));
/* Clear all dirty flags */
memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
skl_clear_wm(results, intel_crtc->pipe);
skl_compute_wm_global_parameters(dev, &config); skl_compute_wm_global_parameters(dev, &config);
@ -4268,7 +4298,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT; MEMMODE_FSTART_SHIFT;
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT; PXVFREQ_PX_SHIFT;
dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
@ -4299,10 +4329,10 @@ static void ironlake_enable_drps(struct drm_device *dev)
ironlake_set_drps(dev, fstart); ironlake_set_drps(dev, fstart);
dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + dev_priv->ips.last_count1 = I915_READ(DMIEC) +
I915_READ(0x112e0); I915_READ(DDREC) + I915_READ(CSIEC);
dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
dev_priv->ips.last_count2 = I915_READ(0x112f4); dev_priv->ips.last_count2 = I915_READ(GFXEC);
dev_priv->ips.last_time2 = ktime_get_raw_ns(); dev_priv->ips.last_time2 = ktime_get_raw_ns();
spin_unlock_irq(&mchdev_lock); spin_unlock_irq(&mchdev_lock);
@ -4473,6 +4503,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0))
return;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_freq); WARN_ON(val > dev_priv->rps.max_freq);
WARN_ON(val < dev_priv->rps.min_freq); WARN_ON(val < dev_priv->rps.min_freq);
@ -4793,6 +4827,12 @@ static void gen9_enable_rps(struct drm_device *dev)
gen6_init_rps_frequencies(dev); gen6_init_rps_frequencies(dev);
/* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
if (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) {
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
return;
}
/* Program defaults and thresholds for RPS*/ /* Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RC_VIDEO_FREQ, I915_WRITE(GEN6_RC_VIDEO_FREQ,
GEN9_FREQUENCY(dev_priv->rps.rp1_freq)); GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
@ -4830,11 +4870,21 @@ static void gen9_enable_rc6(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, 0); I915_WRITE(GEN6_RC_CONTROL, 0);
/* 2b: Program RC6 thresholds.*/ /* 2b: Program RC6 thresholds.*/
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
/* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
(INTEL_REVID(dev) <= SKL_REVID_E0)))
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
else
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(ring, dev_priv, unused) for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
if (HAS_GUC_UCODE(dev))
I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
@ -4847,17 +4897,27 @@ static void gen9_enable_rc6(struct drm_device *dev)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE; rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
"on" : "off"); "on" : "off");
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_EI_MODE(1) | if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) ||
rc6_mask); (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0))
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN7_RC_CTL_TO_MODE |
rc6_mask);
else
I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_EI_MODE(1) |
rc6_mask);
/* /*
* 3b: Enable Coarse Power Gating only when RC6 is enabled. * 3b: Enable Coarse Power Gating only when RC6 is enabled.
* WaDisableRenderPowerGating:skl,bxt - Render PG need to be disabled with RC6. * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/ */
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
GEN9_MEDIA_PG_ENABLE : 0); ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
(GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@ -5871,7 +5931,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
assert_spin_locked(&mchdev_lock); assert_spin_locked(&mchdev_lock);
pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4)); pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
pxvid = (pxvid >> 24) & 0x7f; pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid); ext_v = pvid_to_extvid(dev_priv, pxvid);
@ -6114,13 +6174,13 @@ static void intel_init_emon(struct drm_device *dev)
I915_WRITE(CSIEW2, 0x04000004); I915_WRITE(CSIEW2, 0x04000004);
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
I915_WRITE(PEW + (i * 4), 0); I915_WRITE(PEW(i), 0);
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
I915_WRITE(DEW + (i * 4), 0); I915_WRITE(DEW(i), 0);
/* Program P-state weights to account for frequency power adjustment */ /* Program P-state weights to account for frequency power adjustment */
for (i = 0; i < 16; i++) { for (i = 0; i < 16; i++) {
u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); u32 pxvidfreq = I915_READ(PXVFREQ(i));
unsigned long freq = intel_pxfreq(pxvidfreq); unsigned long freq = intel_pxfreq(pxvidfreq);
unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT; PXVFREQ_PX_SHIFT;
@ -6141,7 +6201,7 @@ static void intel_init_emon(struct drm_device *dev)
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
I915_WRITE(PXW + (i * 4), val); I915_WRITE(PXW(i), val);
} }
/* Adjust magic regs to magic values (more experimental results) */ /* Adjust magic regs to magic values (more experimental results) */
@ -6157,7 +6217,7 @@ static void intel_init_emon(struct drm_device *dev)
I915_WRITE(EG7, 0); I915_WRITE(EG7, 0);
for (i = 0; i < 8; i++) for (i = 0; i < 8; i++)
I915_WRITE(PXWL + (i * 4), 0); I915_WRITE(PXWL(i), 0);
/* Enable PMON + select events */ /* Enable PMON + select events */
I915_WRITE(ECR, 0x80000019); I915_WRITE(ECR, 0x80000019);

View File

@ -983,6 +983,16 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */
if (IS_SKYLAKE(dev) ||
(IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_B0)) {
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS);
}
/* WaDisableSTUnitPowerOptimization:skl,bxt */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
return 0; return 0;
} }

View File

@ -463,14 +463,14 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
SKL_DISP_PW_2); SKL_DISP_PW_2);
WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n"); WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN(pg2_enabled, "PG2 not disabled to enable DC5.\n"); WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
"DC5 already programmed to be enabled.\n"); "DC5 already programmed to be enabled.\n");
WARN(dev_priv->pm.suspended, WARN_ONCE(dev_priv->pm.suspended,
"DC5 cannot be enabled, if platform is runtime-suspended.\n"); "DC5 cannot be enabled, if platform is runtime-suspended.\n");
assert_csr_loaded(dev_priv); assert_csr_loaded(dev_priv);
} }
@ -486,8 +486,8 @@ static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
if (dev_priv->power_domains.initializing) if (dev_priv->power_domains.initializing)
return; return;
WARN(!pg2_enabled, "PG2 not enabled to disable DC5.\n"); WARN_ONCE(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
WARN(dev_priv->pm.suspended, WARN_ONCE(dev_priv->pm.suspended,
"Disabling of DC5 while platform is runtime-suspended should never happen.\n"); "Disabling of DC5 while platform is runtime-suspended should never happen.\n");
} }
@ -526,12 +526,12 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n"); WARN_ONCE(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n"); WARN_ONCE(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Backlight is not disabled.\n"); "Backlight is not disabled.\n");
WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
"DC6 already programmed to be enabled.\n"); "DC6 already programmed to be enabled.\n");
assert_csr_loaded(dev_priv); assert_csr_loaded(dev_priv);
} }
@ -546,8 +546,8 @@ static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
return; return;
assert_csr_loaded(dev_priv); assert_csr_loaded(dev_priv);
WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
"DC6 already programmed to be disabled.\n"); "DC6 already programmed to be disabled.\n");
} }
static void skl_enable_dc6(struct drm_i915_private *dev_priv) static void skl_enable_dc6(struct drm_i915_private *dev_priv)
@ -670,7 +670,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
wait_for((state = intel_csr_load_status_get(dev_priv)) != wait_for((state = intel_csr_load_status_get(dev_priv)) !=
FW_UNINITIALIZED, 1000); FW_UNINITIALIZED, 1000);
if (state != FW_LOADED) if (state != FW_LOADED)
DRM_ERROR("CSR firmware not ready (%d)\n", DRM_DEBUG("CSR firmware not ready (%d)\n",
state); state);
else else
if (SKL_ENABLE_DC6(dev)) if (SKL_ENABLE_DC6(dev))

View File

@ -2222,7 +2222,7 @@ intel_sdvo_guess_ddc_bus(struct intel_sdvo *sdvo)
*/ */
static void static void
intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
struct intel_sdvo *sdvo, u32 reg) struct intel_sdvo *sdvo)
{ {
struct sdvo_device_mapping *mapping; struct sdvo_device_mapping *mapping;
@ -2239,7 +2239,7 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv,
static void static void
intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
struct intel_sdvo *sdvo, u32 reg) struct intel_sdvo *sdvo)
{ {
struct sdvo_device_mapping *mapping; struct sdvo_device_mapping *mapping;
u8 pin; u8 pin;
@ -2925,7 +2925,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
intel_sdvo->sdvo_reg = sdvo_reg; intel_sdvo->sdvo_reg = sdvo_reg;
intel_sdvo->is_sdvob = is_sdvob; intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1; intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg); intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
goto err_i2c_bus; goto err_i2c_bus;
@ -2987,7 +2987,7 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
*/ */
intel_sdvo->base.cloneable = 0; intel_sdvo->base.cloneable = 0;
intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo);
/* Set the input timing to the screen. Assume always input 0. */ /* Set the input timing to the screen. Assume always input 0. */
if (!intel_sdvo_set_target_input(intel_sdvo)) if (!intel_sdvo_set_target_input(intel_sdvo))

View File

@ -95,7 +95,6 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
max = vblank_start - 1; max = vblank_start - 1;
local_irq_disable(); local_irq_disable();
crtc->start_vbl_count = 0;
if (min <= 0 || max <= 0) if (min <= 0 || max <= 0)
return; return;
@ -103,7 +102,9 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
if (WARN_ON(drm_crtc_vblank_get(&crtc->base))) if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
return; return;
trace_i915_pipe_update_start(crtc, min, max); crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
trace_i915_pipe_update_start(crtc);
for (;;) { for (;;) {
/* /*
@ -134,11 +135,12 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
drm_crtc_vblank_put(&crtc->base); drm_crtc_vblank_put(&crtc->base);
crtc->start_vbl_time = ktime_get(); crtc->debug.scanline_start = scanline;
crtc->start_vbl_count = dev->driver->get_vblank_counter(dev, pipe); crtc->debug.start_vbl_time = ktime_get();
crtc->debug.start_vbl_count =
dev->driver->get_vblank_counter(dev, pipe);
trace_i915_pipe_update_vblank_evaded(crtc, min, max, trace_i915_pipe_update_vblank_evaded(crtc);
crtc->start_vbl_count);
} }
/** /**
@ -154,17 +156,23 @@ void intel_pipe_update_end(struct intel_crtc *crtc)
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
enum pipe pipe = crtc->pipe; enum pipe pipe = crtc->pipe;
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe); u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe);
ktime_t end_vbl_time = ktime_get(); ktime_t end_vbl_time = ktime_get();
trace_i915_pipe_update_end(crtc, end_vbl_count); trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
local_irq_enable(); local_irq_enable();
if (crtc->start_vbl_count && crtc->start_vbl_count != end_vbl_count) if (crtc->debug.start_vbl_count &&
DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us\n", crtc->debug.start_vbl_count != end_vbl_count) {
pipe_name(pipe), crtc->start_vbl_count, end_vbl_count, DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
ktime_us_delta(end_vbl_time, crtc->start_vbl_time)); pipe_name(pipe), crtc->debug.start_vbl_count,
end_vbl_count,
ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
crtc->debug.min_vbl, crtc->debug.max_vbl,
crtc->debug.scanline_start, scanline_end);
}
} }
static void static void
@ -227,12 +235,12 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
else if (key->flags & I915_SET_COLORKEY_SOURCE) else if (key->flags & I915_SET_COLORKEY_SOURCE)
plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
surf_addr = intel_plane_obj_offset(intel_plane, obj); surf_addr = intel_plane_obj_offset(intel_plane, obj, 0);
if (intel_rotation_90_or_270(rotation)) { if (intel_rotation_90_or_270(rotation)) {
/* stride: Surface height in tiles */ /* stride: Surface height in tiles */
tile_height = intel_tile_height(dev, fb->pixel_format, tile_height = intel_tile_height(dev, fb->pixel_format,
fb->modifier[0]); fb->modifier[0], 0);
stride = DIV_ROUND_UP(fb->height, tile_height); stride = DIV_ROUND_UP(fb->height, tile_height);
plane_size = (src_w << 16) | src_h; plane_size = (src_w << 16) | src_h;
x_offset = stride * tile_height - y - (src_h + 1); x_offset = stride * tile_height - y - (src_h + 1);
@ -1123,7 +1131,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
intel_plane->pipe = pipe; intel_plane->pipe = pipe;
intel_plane->plane = plane; intel_plane->plane = plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe); intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
intel_plane->check_plane = intel_check_sprite_plane; intel_plane->check_plane = intel_check_sprite_plane;
intel_plane->commit_plane = intel_commit_sprite_plane; intel_plane->commit_plane = intel_commit_sprite_plane;
possible_crtcs = (1 << pipe); possible_crtcs = (1 << pipe);

View File

@ -1138,13 +1138,13 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder)
j = 0; j = 0;
for (i = 0; i < 60; i++) for (i = 0; i < 60; i++)
I915_WRITE(TV_H_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); I915_WRITE(TV_H_LUMA(i), tv_mode->filter_table[j++]);
for (i = 0; i < 60; i++) for (i = 0; i < 60; i++)
I915_WRITE(TV_H_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); I915_WRITE(TV_H_CHROMA(i), tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++) for (i = 0; i < 43; i++)
I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]); I915_WRITE(TV_V_LUMA(i), tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++) for (i = 0; i < 43; i++)
I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]); I915_WRITE(TV_V_CHROMA(i), tv_mode->filter_table[j++]);
I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE); I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
I915_WRITE(TV_CTL, tv_ctl); I915_WRITE(TV_CTL, tv_ctl);
} }

View File

@ -27,7 +27,7 @@
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#define FORCEWAKE_ACK_TIMEOUT_MS 2 #define FORCEWAKE_ACK_TIMEOUT_MS 50
#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__)) #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__)) #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))