mirror of https://gitee.com/openkylin/linux.git
On GEM side:
- GuC related fixes (Chris, Michal) - GTT read-only pages support (Jon, Chris) - More selftests fixes (Chris) - More GPU reset improvements (Chris) - Flush caches after GGTT writes (Chris) - Handle recursive shrinker for vma->last_active allocation (Chris) - Other execlists fixes (Chris) On Display side: - GLK HDMI fix (Clint) - Rework and cleanup around HPD pin (Ville) - Preparation work for Display Stream Compression support coming on ICL (Anusha) - Nuke LVDS lid notification (Ville) - Assume eDP is always connected (Ville) - Kill intel panel detection (Ville) -----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJbULORAAoJEPpiX2QO6xPKevQH/3qlk9S2G3Vg4iBR9FDzYvLr KDPKnve7V11Fr7rGVRSGEK+ISiuABi79uzstNqX1VqbI/Mw/LNxmHvJ5LsrXPewp HVbT6GT2GlAy1tV2yDJHOGO6E4qk+5/rz1H+zIKMne9sU/PtSnxVzu0AxSVt0Jd2 2aQASbHE2yAOA+7Pvvn3GMGr9n0cf6rHE2P7hFbMbjEtobnM3Lq3NL/3e8cz8vxF 4AcUhZvwp1KlYNTKz5bdIuQpHonsYEcKu0DLLAas1NalH7cJryW6erMrtWZiPlon qdQyiyiqqGJsJA2dXIJCS9QmkX/JCxt7ojJQCz72a4nCB6yAd3hvLJ+/W2eU3iM= =QWE6 -----END PGP SIGNATURE----- Merge tag 'drm-intel-next-2018-07-19' of git://anongit.freedesktop.org/drm/drm-intel into drm-next On GEM side: - GuC related fixes (Chris, Michal) - GTT read-only pages support (Jon, Chris) - More selftests fixes (Chris) - More GPU reset improvements (Chris) - Flush caches after GGTT writes (Chris) - Handle recursive shrinker for vma->last_active allocation (Chris) - Other execlists fixes (Chris) On Display side: - GLK HDMI fix (Clint) - Rework and cleanup around HPD pin (Ville) - Preparation work for Display Stream Compression support coming on ICL (Anusha) - Nuke LVDS lid notification (Ville) - Assume eDP is always connected (Ville) - Kill intel panel detection (Ville) Signed-off-by: Dave Airlie <airlied@redhat.com> # gpg: Signature made Fri 20 Jul 2018 01:51:45 AM AEST # gpg: using RSA key FA625F640EEB13CA # gpg: Good signature from "Rodrigo Vivi <rodrigo.vivi@intel.com>" # gpg: aka "Rodrigo Vivi <rodrigo.vivi@gmail.com>" # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 6D20 7068 EEDD 6509 1C2C E2A3 FA62 5F64 0EEB 13CA # Conflicts: # drivers/gpu/drm/i915/intel_lrc.c Link: https://patchwork.freedesktop.org/patch/msgid/20180719171257.GA12199@intel.com
This commit is contained in:
commit
ef8e0ff97a
|
@ -338,6 +338,18 @@ static resource_size_t __init gen3_stolen_base(int num, int slot, int func,
|
|||
return bsm & INTEL_BSM_MASK;
|
||||
}
|
||||
|
||||
static resource_size_t __init gen11_stolen_base(int num, int slot, int func,
|
||||
resource_size_t stolen_size)
|
||||
{
|
||||
u64 bsm;
|
||||
|
||||
bsm = read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW0);
|
||||
bsm &= INTEL_BSM_MASK;
|
||||
bsm |= (u64)read_pci_config(num, slot, func, INTEL_GEN11_BSM_DW1) << 32;
|
||||
|
||||
return bsm;
|
||||
}
|
||||
|
||||
static resource_size_t __init i830_stolen_size(int num, int slot, int func)
|
||||
{
|
||||
u16 gmch_ctrl;
|
||||
|
@ -498,6 +510,11 @@ static const struct intel_early_ops chv_early_ops __initconst = {
|
|||
.stolen_size = chv_stolen_size,
|
||||
};
|
||||
|
||||
static const struct intel_early_ops gen11_early_ops __initconst = {
|
||||
.stolen_base = gen11_stolen_base,
|
||||
.stolen_size = gen9_stolen_size,
|
||||
};
|
||||
|
||||
static const struct pci_device_id intel_early_ids[] __initconst = {
|
||||
INTEL_I830_IDS(&i830_early_ops),
|
||||
INTEL_I845G_IDS(&i845_early_ops),
|
||||
|
@ -529,6 +546,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
|
|||
INTEL_CFL_IDS(&gen9_early_ops),
|
||||
INTEL_GLK_IDS(&gen9_early_ops),
|
||||
INTEL_CNL_IDS(&gen9_early_ops),
|
||||
INTEL_ICL_11_IDS(&gen11_early_ops),
|
||||
};
|
||||
|
||||
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
|
||||
|
|
|
@ -1036,6 +1036,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||
return -EACCES;
|
||||
}
|
||||
|
||||
if (node->readonly) {
|
||||
if (vma->vm_flags & VM_WRITE) {
|
||||
drm_gem_object_put_unlocked(obj);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
}
|
||||
|
||||
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
|
||||
vma);
|
||||
|
||||
|
|
|
@ -216,16 +216,22 @@ static struct gtt_type_table_entry gtt_type_table[] = {
|
|||
GTT_TYPE_PPGTT_PDE_PT,
|
||||
GTT_TYPE_PPGTT_PTE_PT,
|
||||
GTT_TYPE_PPGTT_PTE_2M_ENTRY),
|
||||
/* We take IPS bit as 'PSE' for PTE level. */
|
||||
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
|
||||
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
|
||||
GTT_TYPE_PPGTT_PTE_PT,
|
||||
GTT_TYPE_INVALID,
|
||||
GTT_TYPE_INVALID),
|
||||
GTT_TYPE_PPGTT_PTE_64K_ENTRY),
|
||||
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
|
||||
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
|
||||
GTT_TYPE_PPGTT_PTE_PT,
|
||||
GTT_TYPE_INVALID,
|
||||
GTT_TYPE_INVALID),
|
||||
GTT_TYPE_PPGTT_PTE_64K_ENTRY),
|
||||
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
|
||||
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
|
||||
GTT_TYPE_PPGTT_PTE_PT,
|
||||
GTT_TYPE_INVALID,
|
||||
GTT_TYPE_PPGTT_PTE_64K_ENTRY),
|
||||
GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
|
||||
GTT_TYPE_PPGTT_PDE_ENTRY,
|
||||
GTT_TYPE_PPGTT_PDE_PT,
|
||||
|
@ -339,8 +345,14 @@ static inline int gtt_set_entry64(void *pt,
|
|||
|
||||
#define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
|
||||
#define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
|
||||
#define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
|
||||
#define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
|
||||
|
||||
#define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
|
||||
#define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
|
||||
|
||||
#define GTT_64K_PTE_STRIDE 16
|
||||
|
||||
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
|
||||
{
|
||||
unsigned long pfn;
|
||||
|
@ -349,6 +361,8 @@ static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
|
|||
pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
|
||||
else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
|
||||
pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
|
||||
else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
|
||||
pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
|
||||
else
|
||||
pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
|
||||
return pfn;
|
||||
|
@ -362,6 +376,9 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
|
|||
} else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
|
||||
e->val64 &= ~ADDR_2M_MASK;
|
||||
pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
|
||||
} else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
|
||||
e->val64 &= ~ADDR_64K_MASK;
|
||||
pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
|
||||
} else {
|
||||
e->val64 &= ~ADDR_4K_MASK;
|
||||
pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
|
||||
|
@ -372,16 +389,41 @@ static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
|
|||
|
||||
static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
|
||||
{
|
||||
/* Entry doesn't have PSE bit. */
|
||||
if (get_pse_type(e->type) == GTT_TYPE_INVALID)
|
||||
return !!(e->val64 & _PAGE_PSE);
|
||||
}
|
||||
|
||||
static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
|
||||
{
|
||||
if (gen8_gtt_test_pse(e)) {
|
||||
switch (e->type) {
|
||||
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
|
||||
e->val64 &= ~_PAGE_PSE;
|
||||
e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
|
||||
break;
|
||||
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
|
||||
e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
|
||||
e->val64 &= ~_PAGE_PSE;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
|
||||
{
|
||||
if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
|
||||
return false;
|
||||
|
||||
e->type = get_entry_type(e->type);
|
||||
if (!(e->val64 & _PAGE_PSE))
|
||||
return false;
|
||||
return !!(e->val64 & GEN8_PDE_IPS_64K);
|
||||
}
|
||||
|
||||
e->type = get_pse_type(e->type);
|
||||
return true;
|
||||
static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
|
||||
{
|
||||
if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
|
||||
return;
|
||||
|
||||
e->val64 &= ~GEN8_PDE_IPS_64K;
|
||||
}
|
||||
|
||||
static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
|
||||
|
@ -408,6 +450,21 @@ static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
|
|||
e->val64 |= _PAGE_PRESENT;
|
||||
}
|
||||
|
||||
static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
|
||||
{
|
||||
return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
|
||||
}
|
||||
|
||||
static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
|
||||
{
|
||||
e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
|
||||
}
|
||||
|
||||
static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
|
||||
{
|
||||
e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Per-platform GMA routines.
|
||||
*/
|
||||
|
@ -440,6 +497,12 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
|
|||
.set_present = gtt_entry_set_present,
|
||||
.test_present = gen8_gtt_test_present,
|
||||
.test_pse = gen8_gtt_test_pse,
|
||||
.clear_pse = gen8_gtt_clear_pse,
|
||||
.clear_ips = gen8_gtt_clear_ips,
|
||||
.test_ips = gen8_gtt_test_ips,
|
||||
.clear_64k_splited = gen8_gtt_clear_64k_splited,
|
||||
.set_64k_splited = gen8_gtt_set_64k_splited,
|
||||
.test_64k_splited = gen8_gtt_test_64k_splited,
|
||||
.get_pfn = gen8_gtt_get_pfn,
|
||||
.set_pfn = gen8_gtt_set_pfn,
|
||||
};
|
||||
|
@ -453,6 +516,27 @@ static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
|
|||
.gma_to_pml4_index = gen8_gma_to_pml4_index,
|
||||
};
|
||||
|
||||
/* Update entry type per pse and ips bit. */
|
||||
static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
|
||||
struct intel_gvt_gtt_entry *entry, bool ips)
|
||||
{
|
||||
switch (entry->type) {
|
||||
case GTT_TYPE_PPGTT_PDE_ENTRY:
|
||||
case GTT_TYPE_PPGTT_PDP_ENTRY:
|
||||
if (pte_ops->test_pse(entry))
|
||||
entry->type = get_pse_type(entry->type);
|
||||
break;
|
||||
case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
|
||||
if (ips)
|
||||
entry->type = get_pse_type(entry->type);
|
||||
break;
|
||||
default:
|
||||
GEM_BUG_ON(!gtt_type_is_entry(entry->type));
|
||||
}
|
||||
|
||||
GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
|
||||
}
|
||||
|
||||
/*
|
||||
* MM helpers.
|
||||
*/
|
||||
|
@ -468,8 +552,7 @@ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
|
|||
pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
|
||||
mm->ppgtt_mm.shadow_pdps,
|
||||
entry, index, false, 0, mm->vgpu);
|
||||
|
||||
pte_ops->test_pse(entry);
|
||||
update_entry_type_for_real(pte_ops, entry, false);
|
||||
}
|
||||
|
||||
static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
|
||||
|
@ -574,7 +657,8 @@ static inline int ppgtt_spt_get_entry(
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ops->test_pse(e);
|
||||
update_entry_type_for_real(ops, e, guest ?
|
||||
spt->guest_page.pde_ips : false);
|
||||
|
||||
gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
|
||||
type, e->type, index, e->val64);
|
||||
|
@ -653,10 +737,12 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
|
|||
|
||||
radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
|
||||
|
||||
if (spt->guest_page.oos_page)
|
||||
detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
|
||||
if (spt->guest_page.gfn) {
|
||||
if (spt->guest_page.oos_page)
|
||||
detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
|
||||
|
||||
intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
|
||||
intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
|
||||
}
|
||||
|
||||
list_del_init(&spt->post_shadow_list);
|
||||
free_spt(spt);
|
||||
|
@ -717,8 +803,9 @@ static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
|
|||
|
||||
static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
|
||||
|
||||
/* Allocate shadow page table without guest page. */
|
||||
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
|
||||
struct intel_vgpu *vgpu, int type, unsigned long gfn)
|
||||
struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type)
|
||||
{
|
||||
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
||||
struct intel_vgpu_ppgtt_spt *spt = NULL;
|
||||
|
@ -753,26 +840,12 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
|
|||
spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
|
||||
spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* Init guest_page.
|
||||
*/
|
||||
spt->guest_page.type = type;
|
||||
spt->guest_page.gfn = gfn;
|
||||
|
||||
ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
|
||||
ppgtt_write_protection_handler, spt);
|
||||
ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
|
||||
if (ret)
|
||||
goto err_unmap_dma;
|
||||
|
||||
ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
|
||||
if (ret)
|
||||
goto err_unreg_page_track;
|
||||
|
||||
trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
|
||||
return spt;
|
||||
|
||||
err_unreg_page_track:
|
||||
intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
|
||||
err_unmap_dma:
|
||||
dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
err_free_spt:
|
||||
|
@ -780,6 +853,37 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
|
|||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/* Allocate shadow page table associated with specific gfn. */
|
||||
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
|
||||
struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type,
|
||||
unsigned long gfn, bool guest_pde_ips)
|
||||
{
|
||||
struct intel_vgpu_ppgtt_spt *spt;
|
||||
int ret;
|
||||
|
||||
spt = ppgtt_alloc_spt(vgpu, type);
|
||||
if (IS_ERR(spt))
|
||||
return spt;
|
||||
|
||||
/*
|
||||
* Init guest_page.
|
||||
*/
|
||||
ret = intel_vgpu_register_page_track(vgpu, gfn,
|
||||
ppgtt_write_protection_handler, spt);
|
||||
if (ret) {
|
||||
ppgtt_free_spt(spt);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
spt->guest_page.type = type;
|
||||
spt->guest_page.gfn = gfn;
|
||||
spt->guest_page.pde_ips = guest_pde_ips;
|
||||
|
||||
trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
|
||||
|
||||
return spt;
|
||||
}
|
||||
|
||||
#define pt_entry_size_shift(spt) \
|
||||
((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
|
||||
|
||||
|
@ -787,24 +891,38 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
|
|||
(I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
|
||||
|
||||
#define for_each_present_guest_entry(spt, e, i) \
|
||||
for (i = 0; i < pt_entries(spt); i++) \
|
||||
for (i = 0; i < pt_entries(spt); \
|
||||
i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
|
||||
if (!ppgtt_get_guest_entry(spt, e, i) && \
|
||||
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
|
||||
|
||||
#define for_each_present_shadow_entry(spt, e, i) \
|
||||
for (i = 0; i < pt_entries(spt); i++) \
|
||||
for (i = 0; i < pt_entries(spt); \
|
||||
i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
|
||||
if (!ppgtt_get_shadow_entry(spt, e, i) && \
|
||||
spt->vgpu->gvt->gtt.pte_ops->test_present(e))
|
||||
|
||||
static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
|
||||
#define for_each_shadow_entry(spt, e, i) \
|
||||
for (i = 0; i < pt_entries(spt); \
|
||||
i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
|
||||
if (!ppgtt_get_shadow_entry(spt, e, i))
|
||||
|
||||
static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
|
||||
{
|
||||
int v = atomic_read(&spt->refcount);
|
||||
|
||||
trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
|
||||
|
||||
atomic_inc(&spt->refcount);
|
||||
}
|
||||
|
||||
static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
|
||||
{
|
||||
int v = atomic_read(&spt->refcount);
|
||||
|
||||
trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
|
||||
return atomic_dec_return(&spt->refcount);
|
||||
}
|
||||
|
||||
static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
|
||||
|
||||
static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
|
||||
|
@ -843,7 +961,8 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
|
|||
pfn = ops->get_pfn(entry);
|
||||
type = spt->shadow_page.type;
|
||||
|
||||
if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
|
||||
/* Uninitialized spte or unshadowed spte. */
|
||||
if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
|
||||
return;
|
||||
|
||||
intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
|
||||
|
@ -855,14 +974,11 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
|
|||
struct intel_gvt_gtt_entry e;
|
||||
unsigned long index;
|
||||
int ret;
|
||||
int v = atomic_read(&spt->refcount);
|
||||
|
||||
trace_spt_change(spt->vgpu->id, "die", spt,
|
||||
spt->guest_page.gfn, spt->shadow_page.type);
|
||||
|
||||
trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
|
||||
|
||||
if (atomic_dec_return(&spt->refcount) > 0)
|
||||
if (ppgtt_put_spt(spt) > 0)
|
||||
return 0;
|
||||
|
||||
for_each_present_shadow_entry(spt, &e, index) {
|
||||
|
@ -871,9 +987,15 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
|
|||
gvt_vdbg_mm("invalidate 4K entry\n");
|
||||
ppgtt_invalidate_pte(spt, &e);
|
||||
break;
|
||||
case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
|
||||
/* We don't setup 64K shadow entry so far. */
|
||||
WARN(1, "suspicious 64K gtt entry\n");
|
||||
continue;
|
||||
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
|
||||
gvt_vdbg_mm("invalidate 2M entry\n");
|
||||
continue;
|
||||
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
|
||||
WARN(1, "GVT doesn't support 2M/1GB page\n");
|
||||
WARN(1, "GVT doesn't support 1GB page\n");
|
||||
continue;
|
||||
case GTT_TYPE_PPGTT_PML4_ENTRY:
|
||||
case GTT_TYPE_PPGTT_PDP_ENTRY:
|
||||
|
@ -899,6 +1021,22 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
|
||||
u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
|
||||
GAMW_ECO_ENABLE_64K_IPS_FIELD;
|
||||
|
||||
return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
|
||||
} else if (INTEL_GEN(dev_priv) >= 11) {
|
||||
/* 64K paging only controlled by IPS bit in PTE now. */
|
||||
return true;
|
||||
} else
|
||||
return false;
|
||||
}
|
||||
|
||||
static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
|
||||
|
||||
static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
|
||||
|
@ -906,35 +1044,54 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
|
|||
{
|
||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
struct intel_vgpu_ppgtt_spt *spt = NULL;
|
||||
bool ips = false;
|
||||
int ret;
|
||||
|
||||
GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
|
||||
|
||||
if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
|
||||
ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
|
||||
|
||||
spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
|
||||
if (spt)
|
||||
if (spt) {
|
||||
ppgtt_get_spt(spt);
|
||||
else {
|
||||
|
||||
if (ips != spt->guest_page.pde_ips) {
|
||||
spt->guest_page.pde_ips = ips;
|
||||
|
||||
gvt_dbg_mm("reshadow PDE since ips changed\n");
|
||||
clear_page(spt->shadow_page.vaddr);
|
||||
ret = ppgtt_populate_spt(spt);
|
||||
if (ret) {
|
||||
ppgtt_put_spt(spt);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
int type = get_next_pt_type(we->type);
|
||||
|
||||
spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we));
|
||||
spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
|
||||
if (IS_ERR(spt)) {
|
||||
ret = PTR_ERR(spt);
|
||||
goto fail;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
|
||||
if (ret)
|
||||
goto fail;
|
||||
goto err_free_spt;
|
||||
|
||||
ret = ppgtt_populate_spt(spt);
|
||||
if (ret)
|
||||
goto fail;
|
||||
goto err_free_spt;
|
||||
|
||||
trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
|
||||
spt->shadow_page.type);
|
||||
}
|
||||
return spt;
|
||||
fail:
|
||||
|
||||
err_free_spt:
|
||||
ppgtt_free_spt(spt);
|
||||
err:
|
||||
gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
|
||||
spt, we->val64, we->type);
|
||||
return ERR_PTR(ret);
|
||||
|
@ -948,16 +1105,118 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
|
|||
se->type = ge->type;
|
||||
se->val64 = ge->val64;
|
||||
|
||||
/* Because we always split 64KB pages, so clear IPS in shadow PDE. */
|
||||
if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
|
||||
ops->clear_ips(se);
|
||||
|
||||
ops->set_pfn(se, s->shadow_page.mfn);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
|
||||
* negtive if found err.
|
||||
*/
|
||||
static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
|
||||
struct intel_gvt_gtt_entry *entry)
|
||||
{
|
||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
unsigned long pfn;
|
||||
|
||||
if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
|
||||
return 0;
|
||||
|
||||
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
|
||||
if (pfn == INTEL_GVT_INVALID_ADDR)
|
||||
return -EINVAL;
|
||||
|
||||
return PageTransHuge(pfn_to_page(pfn));
|
||||
}
|
||||
|
||||
static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
|
||||
struct intel_gvt_gtt_entry *se)
|
||||
{
|
||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
struct intel_vgpu_ppgtt_spt *sub_spt;
|
||||
struct intel_gvt_gtt_entry sub_se;
|
||||
unsigned long start_gfn;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned long sub_index;
|
||||
int ret;
|
||||
|
||||
gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
|
||||
|
||||
start_gfn = ops->get_pfn(se);
|
||||
|
||||
sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
|
||||
if (IS_ERR(sub_spt))
|
||||
return PTR_ERR(sub_spt);
|
||||
|
||||
for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
|
||||
start_gfn + sub_index, PAGE_SIZE, &dma_addr);
|
||||
if (ret) {
|
||||
ppgtt_invalidate_spt(spt);
|
||||
return ret;
|
||||
}
|
||||
sub_se.val64 = se->val64;
|
||||
|
||||
/* Copy the PAT field from PDE. */
|
||||
sub_se.val64 &= ~_PAGE_PAT;
|
||||
sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
|
||||
|
||||
ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
|
||||
ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
|
||||
}
|
||||
|
||||
/* Clear dirty field. */
|
||||
se->val64 &= ~_PAGE_DIRTY;
|
||||
|
||||
ops->clear_pse(se);
|
||||
ops->clear_ips(se);
|
||||
ops->set_pfn(se, sub_spt->shadow_page.mfn);
|
||||
ppgtt_set_shadow_entry(spt, se, index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
|
||||
struct intel_gvt_gtt_entry *se)
|
||||
{
|
||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
struct intel_gvt_gtt_entry entry = *se;
|
||||
unsigned long start_gfn;
|
||||
dma_addr_t dma_addr;
|
||||
int i, ret;
|
||||
|
||||
gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
|
||||
|
||||
GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
|
||||
|
||||
start_gfn = ops->get_pfn(se);
|
||||
|
||||
entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
|
||||
ops->set_64k_splited(&entry);
|
||||
|
||||
for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
|
||||
start_gfn + i, PAGE_SIZE, &dma_addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
|
||||
ppgtt_set_shadow_entry(spt, &entry, index + i);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
|
||||
struct intel_gvt_gtt_entry *ge)
|
||||
{
|
||||
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
|
||||
struct intel_gvt_gtt_entry se = *ge;
|
||||
unsigned long gfn;
|
||||
unsigned long gfn, page_size = PAGE_SIZE;
|
||||
dma_addr_t dma_addr;
|
||||
int ret;
|
||||
|
||||
|
@ -970,16 +1229,33 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
|
|||
case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
|
||||
gvt_vdbg_mm("shadow 4K gtt entry\n");
|
||||
break;
|
||||
case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
|
||||
gvt_vdbg_mm("shadow 64K gtt entry\n");
|
||||
/*
|
||||
* The layout of 64K page is special, the page size is
|
||||
* controlled by uper PDE. To be simple, we always split
|
||||
* 64K page to smaller 4K pages in shadow PT.
|
||||
*/
|
||||
return split_64KB_gtt_entry(vgpu, spt, index, &se);
|
||||
case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
|
||||
gvt_vdbg_mm("shadow 2M gtt entry\n");
|
||||
ret = is_2MB_gtt_possible(vgpu, ge);
|
||||
if (ret == 0)
|
||||
return split_2MB_gtt_entry(vgpu, spt, index, &se);
|
||||
else if (ret < 0)
|
||||
return ret;
|
||||
page_size = I915_GTT_PAGE_SIZE_2M;
|
||||
break;
|
||||
case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
|
||||
gvt_vgpu_err("GVT doesn't support 2M/1GB entry\n");
|
||||
gvt_vgpu_err("GVT doesn't support 1GB entry\n");
|
||||
return -EINVAL;
|
||||
default:
|
||||
GEM_BUG_ON(1);
|
||||
};
|
||||
|
||||
/* direct shadow */
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
|
||||
&dma_addr);
|
||||
if (ret)
|
||||
return -ENXIO;
|
||||
|
||||
|
@ -1062,8 +1338,12 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
|
|||
ret = ppgtt_invalidate_spt(s);
|
||||
if (ret)
|
||||
goto fail;
|
||||
} else
|
||||
} else {
|
||||
/* We don't setup 64K shadow entry so far. */
|
||||
WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
|
||||
"suspicious 64K entry\n");
|
||||
ppgtt_invalidate_pte(spt, se);
|
||||
}
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
|
@ -1286,7 +1566,7 @@ static int ppgtt_handle_guest_write_page_table(
|
|||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
struct intel_gvt_gtt_entry old_se;
|
||||
int new_present;
|
||||
int ret;
|
||||
int i, ret;
|
||||
|
||||
new_present = ops->test_present(we);
|
||||
|
||||
|
@ -1308,8 +1588,27 @@ static int ppgtt_handle_guest_write_page_table(
|
|||
goto fail;
|
||||
|
||||
if (!new_present) {
|
||||
ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
|
||||
ppgtt_set_shadow_entry(spt, &old_se, index);
|
||||
/* For 64KB splited entries, we need clear them all. */
|
||||
if (ops->test_64k_splited(&old_se) &&
|
||||
!(index % GTT_64K_PTE_STRIDE)) {
|
||||
gvt_vdbg_mm("remove splited 64K shadow entries\n");
|
||||
for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
|
||||
ops->clear_64k_splited(&old_se);
|
||||
ops->set_pfn(&old_se,
|
||||
vgpu->gtt.scratch_pt[type].page_mfn);
|
||||
ppgtt_set_shadow_entry(spt, &old_se, index + i);
|
||||
}
|
||||
} else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
|
||||
old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
|
||||
ops->clear_pse(&old_se);
|
||||
ops->set_pfn(&old_se,
|
||||
vgpu->gtt.scratch_pt[type].page_mfn);
|
||||
ppgtt_set_shadow_entry(spt, &old_se, index);
|
||||
} else {
|
||||
ops->set_pfn(&old_se,
|
||||
vgpu->gtt.scratch_pt[type].page_mfn);
|
||||
ppgtt_set_shadow_entry(spt, &old_se, index);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1391,7 +1690,17 @@ static int ppgtt_handle_guest_write_page_table_bytes(
|
|||
|
||||
ppgtt_get_guest_entry(spt, &we, index);
|
||||
|
||||
ops->test_pse(&we);
|
||||
/*
|
||||
* For page table which has 64K gtt entry, only PTE#0, PTE#16,
|
||||
* PTE#32, ... PTE#496 are used. Unused PTEs update should be
|
||||
* ignored.
|
||||
*/
|
||||
if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
|
||||
(index % GTT_64K_PTE_STRIDE)) {
|
||||
gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
|
||||
index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (bytes == info->gtt_entry_size) {
|
||||
ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
|
||||
|
@ -1881,7 +2190,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
|
|||
}
|
||||
|
||||
ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
|
||||
&dma_addr);
|
||||
PAGE_SIZE, &dma_addr);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("fail to populate guest ggtt entry\n");
|
||||
/* guest driver may read/write the entry when partial
|
||||
|
|
|
@ -63,6 +63,12 @@ struct intel_gvt_gtt_pte_ops {
|
|||
void (*clear_present)(struct intel_gvt_gtt_entry *e);
|
||||
void (*set_present)(struct intel_gvt_gtt_entry *e);
|
||||
bool (*test_pse)(struct intel_gvt_gtt_entry *e);
|
||||
void (*clear_pse)(struct intel_gvt_gtt_entry *e);
|
||||
bool (*test_ips)(struct intel_gvt_gtt_entry *e);
|
||||
void (*clear_ips)(struct intel_gvt_gtt_entry *e);
|
||||
bool (*test_64k_splited)(struct intel_gvt_gtt_entry *e);
|
||||
void (*clear_64k_splited)(struct intel_gvt_gtt_entry *e);
|
||||
void (*set_64k_splited)(struct intel_gvt_gtt_entry *e);
|
||||
void (*set_pfn)(struct intel_gvt_gtt_entry *e, unsigned long pfn);
|
||||
unsigned long (*get_pfn)(struct intel_gvt_gtt_entry *e);
|
||||
};
|
||||
|
@ -95,6 +101,7 @@ typedef enum {
|
|||
GTT_TYPE_GGTT_PTE,
|
||||
|
||||
GTT_TYPE_PPGTT_PTE_4K_ENTRY,
|
||||
GTT_TYPE_PPGTT_PTE_64K_ENTRY,
|
||||
GTT_TYPE_PPGTT_PTE_2M_ENTRY,
|
||||
GTT_TYPE_PPGTT_PTE_1G_ENTRY,
|
||||
|
||||
|
@ -220,6 +227,7 @@ struct intel_vgpu_ppgtt_spt {
|
|||
|
||||
struct {
|
||||
intel_gvt_gtt_type_t type;
|
||||
bool pde_ips; /* for 64KB PTEs */
|
||||
void *vaddr;
|
||||
struct page *page;
|
||||
unsigned long mfn;
|
||||
|
@ -227,6 +235,7 @@ struct intel_vgpu_ppgtt_spt {
|
|||
|
||||
struct {
|
||||
intel_gvt_gtt_type_t type;
|
||||
bool pde_ips; /* for 64KB PTEs */
|
||||
unsigned long gfn;
|
||||
unsigned long write_cnt;
|
||||
struct intel_vgpu_oos_page *oos_page;
|
||||
|
|
|
@ -468,3 +468,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
|||
kfree(gvt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
|
||||
MODULE_SOFTDEP("pre: kvmgt");
|
||||
#endif
|
||||
|
|
|
@ -210,6 +210,31 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
|
||||
|
||||
if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
|
||||
if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
|
||||
gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
|
||||
else if (!ips)
|
||||
gvt_dbg_core("vgpu%d: ips disabled\n", vgpu->id);
|
||||
else {
|
||||
/* All engines must be enabled together for vGPU,
|
||||
* since we don't know which engine the ppgtt will
|
||||
* bind to when shadowing.
|
||||
*/
|
||||
gvt_vgpu_err("Unsupported IPS setting %x, cannot enable 64K gtt.\n",
|
||||
ips);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
|
@ -1564,6 +1589,13 @@ static int bxt_gt_disp_pwron_write(struct intel_vgpu *vgpu,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
vgpu_vreg(vgpu, offset) = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mmio_read_from_hw(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
|
@ -1774,7 +1806,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
|
|||
|
||||
MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
|
||||
|
||||
MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
|
||||
MMIO_DH(GEN8_GAMW_ECO_DEV_RW_IA, D_BDW_PLUS, NULL,
|
||||
gamw_echo_dev_rw_ia_write);
|
||||
|
||||
MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
|
||||
MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
|
||||
|
@ -3160,6 +3194,9 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
|
|||
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
|
||||
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
|
||||
|
||||
MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
|
||||
MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
|
||||
|
||||
MMIO_D(RC6_CTX_BASE, D_BXT);
|
||||
|
||||
MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
|
||||
|
|
|
@ -53,7 +53,7 @@ struct intel_gvt_mpt {
|
|||
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
|
||||
|
||||
int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
|
||||
dma_addr_t *dma_addr);
|
||||
unsigned long size, dma_addr_t *dma_addr);
|
||||
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
|
||||
|
||||
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
|
||||
|
|
|
@ -94,6 +94,7 @@ struct gvt_dma {
|
|||
struct rb_node dma_addr_node;
|
||||
gfn_t gfn;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned long size;
|
||||
struct kref ref;
|
||||
};
|
||||
|
||||
|
@ -106,22 +107,83 @@ static int kvmgt_guest_init(struct mdev_device *mdev);
|
|||
static void intel_vgpu_release_work(struct work_struct *work);
|
||||
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
|
||||
|
||||
static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
dma_addr_t *dma_addr)
|
||||
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
unsigned long size)
|
||||
{
|
||||
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
||||
struct page *page;
|
||||
unsigned long pfn;
|
||||
int total_pages;
|
||||
int npage;
|
||||
int ret;
|
||||
|
||||
/* Pin the page first. */
|
||||
ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1,
|
||||
IOMMU_READ | IOMMU_WRITE, &pfn);
|
||||
if (ret != 1) {
|
||||
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n",
|
||||
gfn, ret);
|
||||
return -EINVAL;
|
||||
total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
|
||||
|
||||
for (npage = 0; npage < total_pages; npage++) {
|
||||
unsigned long cur_gfn = gfn + npage;
|
||||
|
||||
ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
|
||||
WARN_ON(ret != 1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Pin a normal or compound guest page for dma. */
|
||||
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
unsigned long size, struct page **page)
|
||||
{
|
||||
unsigned long base_pfn = 0;
|
||||
int total_pages;
|
||||
int npage;
|
||||
int ret;
|
||||
|
||||
total_pages = roundup(size, PAGE_SIZE) / PAGE_SIZE;
|
||||
/*
|
||||
* We pin the pages one-by-one to avoid allocating a big arrary
|
||||
* on stack to hold pfns.
|
||||
*/
|
||||
for (npage = 0; npage < total_pages; npage++) {
|
||||
unsigned long cur_gfn = gfn + npage;
|
||||
unsigned long pfn;
|
||||
|
||||
ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
|
||||
IOMMU_READ | IOMMU_WRITE, &pfn);
|
||||
if (ret != 1) {
|
||||
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
|
||||
cur_gfn, ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!pfn_valid(pfn)) {
|
||||
gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
|
||||
npage++;
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (npage == 0)
|
||||
base_pfn = pfn;
|
||||
else if (base_pfn + npage != pfn) {
|
||||
gvt_vgpu_err("The pages are not continuous\n");
|
||||
ret = -EINVAL;
|
||||
npage++;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
*page = pfn_to_page(base_pfn);
|
||||
return 0;
|
||||
err:
|
||||
gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
dma_addr_t *dma_addr, unsigned long size)
|
||||
{
|
||||
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
||||
struct page *page = NULL;
|
||||
int ret;
|
||||
|
||||
ret = gvt_pin_guest_page(vgpu, gfn, size, &page);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!pfn_valid(pfn)) {
|
||||
gvt_vgpu_err("pfn 0x%lx is not mem backed\n", pfn);
|
||||
|
@ -130,27 +192,24 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
|||
}
|
||||
|
||||
/* Setup DMA mapping. */
|
||||
page = pfn_to_page(pfn);
|
||||
*dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, *dma_addr)) {
|
||||
gvt_vgpu_err("DMA mapping failed for gfn 0x%lx\n", gfn);
|
||||
vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
|
||||
return -ENOMEM;
|
||||
*dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
|
||||
ret = dma_mapping_error(dev, *dma_addr);
|
||||
if (ret) {
|
||||
gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
|
||||
page_to_pfn(page), ret);
|
||||
gvt_unpin_guest_page(vgpu, gfn, size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
dma_addr_t dma_addr)
|
||||
dma_addr_t dma_addr, unsigned long size)
|
||||
{
|
||||
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
|
||||
int ret;
|
||||
|
||||
dma_unmap_page(dev, dma_addr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
||||
ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &gfn, 1);
|
||||
WARN_ON(ret != 1);
|
||||
dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
|
||||
gvt_unpin_guest_page(vgpu, gfn, size);
|
||||
}
|
||||
|
||||
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
|
||||
|
@ -191,7 +250,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
|
|||
}
|
||||
|
||||
static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
|
||||
dma_addr_t dma_addr)
|
||||
dma_addr_t dma_addr, unsigned long size)
|
||||
{
|
||||
struct gvt_dma *new, *itr;
|
||||
struct rb_node **link, *parent = NULL;
|
||||
|
@ -203,6 +262,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
|
|||
new->vgpu = vgpu;
|
||||
new->gfn = gfn;
|
||||
new->dma_addr = dma_addr;
|
||||
new->size = size;
|
||||
kref_init(&new->ref);
|
||||
|
||||
/* gfn_cache maps gfn to struct gvt_dma. */
|
||||
|
@ -260,7 +320,7 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu)
|
|||
break;
|
||||
}
|
||||
dma = rb_entry(node, struct gvt_dma, gfn_node);
|
||||
gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr);
|
||||
gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
|
||||
__gvt_cache_remove_entry(vgpu, dma);
|
||||
mutex_unlock(&vgpu->vdev.cache_lock);
|
||||
}
|
||||
|
@ -515,7 +575,8 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
|
|||
if (!entry)
|
||||
continue;
|
||||
|
||||
gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr);
|
||||
gvt_dma_unmap_page(vgpu, entry->gfn, entry->dma_addr,
|
||||
entry->size);
|
||||
__gvt_cache_remove_entry(vgpu, entry);
|
||||
}
|
||||
mutex_unlock(&vgpu->vdev.cache_lock);
|
||||
|
@ -1648,7 +1709,7 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
|
|||
}
|
||||
|
||||
int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
|
||||
dma_addr_t *dma_addr)
|
||||
unsigned long size, dma_addr_t *dma_addr)
|
||||
{
|
||||
struct kvmgt_guest_info *info;
|
||||
struct intel_vgpu *vgpu;
|
||||
|
@ -1665,11 +1726,11 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
|
|||
|
||||
entry = __gvt_cache_find_gfn(info->vgpu, gfn);
|
||||
if (!entry) {
|
||||
ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
|
||||
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
|
||||
ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr);
|
||||
ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
|
||||
if (ret)
|
||||
goto err_unmap;
|
||||
} else {
|
||||
|
@ -1681,7 +1742,7 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
|
|||
return 0;
|
||||
|
||||
err_unmap:
|
||||
gvt_dma_unmap_page(vgpu, gfn, *dma_addr);
|
||||
gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
|
||||
err_unlock:
|
||||
mutex_unlock(&info->vgpu->vdev.cache_lock);
|
||||
return ret;
|
||||
|
@ -1691,7 +1752,8 @@ static void __gvt_dma_release(struct kref *ref)
|
|||
{
|
||||
struct gvt_dma *entry = container_of(ref, typeof(*entry), ref);
|
||||
|
||||
gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr);
|
||||
gvt_dma_unmap_page(entry->vgpu, entry->gfn, entry->dma_addr,
|
||||
entry->size);
|
||||
__gvt_cache_remove_entry(entry->vgpu, entry);
|
||||
}
|
||||
|
||||
|
|
|
@ -230,17 +230,18 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
|
|||
/**
|
||||
* intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
|
||||
* @vgpu: a vGPU
|
||||
* @gpfn: guest pfn
|
||||
* @gfn: guest pfn
|
||||
* @size: page size
|
||||
* @dma_addr: retrieve allocated dma addr
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, negative error code if failed.
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_dma_map_guest_page(
|
||||
struct intel_vgpu *vgpu, unsigned long gfn,
|
||||
struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size,
|
||||
dma_addr_t *dma_addr)
|
||||
{
|
||||
return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn,
|
||||
return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size,
|
||||
dma_addr);
|
||||
}
|
||||
|
||||
|
|
|
@ -46,6 +46,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
|||
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
|
||||
|
||||
vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
|
||||
vgpu_aperture_gmadr_base(vgpu);
|
||||
|
|
|
@ -2592,6 +2592,41 @@ static const struct file_operations i915_guc_log_relay_fops = {
|
|||
.release = i915_guc_log_relay_release,
|
||||
};
|
||||
|
||||
static int i915_psr_sink_status_show(struct seq_file *m, void *data)
|
||||
{
|
||||
u8 val;
|
||||
static const char * const sink_status[] = {
|
||||
"inactive",
|
||||
"transition to active, capture and display",
|
||||
"active, display from RFB",
|
||||
"active, capture and display on sink device timings",
|
||||
"transition to inactive, capture and display, timing re-sync",
|
||||
"reserved",
|
||||
"reserved",
|
||||
"sink internal error",
|
||||
};
|
||||
struct drm_connector *connector = m->private;
|
||||
struct intel_dp *intel_dp =
|
||||
enc_to_intel_dp(&intel_attached_encoder(connector)->base);
|
||||
|
||||
if (connector->status != connector_status_connected)
|
||||
return -ENODEV;
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val) == 1) {
|
||||
const char *str = "unknown";
|
||||
|
||||
val &= DP_PSR_SINK_STATE_MASK;
|
||||
if (val < ARRAY_SIZE(sink_status))
|
||||
str = sink_status[val];
|
||||
seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
|
||||
} else {
|
||||
DRM_ERROR("dpcd read (at %u) failed\n", DP_PSR_STATUS);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
|
||||
|
||||
static void
|
||||
psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
|
||||
{
|
||||
|
@ -2643,26 +2678,6 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
|
|||
seq_printf(m, "Source PSR status: 0x%x [%s]\n", psr_status, "unknown");
|
||||
}
|
||||
|
||||
static const char *psr_sink_status(u8 val)
|
||||
{
|
||||
static const char * const sink_status[] = {
|
||||
"inactive",
|
||||
"transition to active, capture and display",
|
||||
"active, display from RFB",
|
||||
"active, capture and display on sink device timings",
|
||||
"transition to inactive, capture and display, timing re-sync",
|
||||
"reserved",
|
||||
"reserved",
|
||||
"sink internal error"
|
||||
};
|
||||
|
||||
val &= DP_PSR_SINK_STATE_MASK;
|
||||
if (val < ARRAY_SIZE(sink_status))
|
||||
return sink_status[val];
|
||||
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
static int i915_edp_psr_status(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
|
@ -2706,15 +2721,6 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
|
|||
}
|
||||
|
||||
psr_source_status(dev_priv, m);
|
||||
|
||||
if (dev_priv->psr.enabled) {
|
||||
struct drm_dp_aux *aux = &dev_priv->psr.enabled->aux;
|
||||
u8 val;
|
||||
|
||||
if (drm_dp_dpcd_readb(aux, DP_PSR_STATUS, &val) == 1)
|
||||
seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val,
|
||||
psr_sink_status(val));
|
||||
}
|
||||
mutex_unlock(&dev_priv->psr.lock);
|
||||
|
||||
if (READ_ONCE(dev_priv->psr.debug)) {
|
||||
|
@ -2761,86 +2767,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
|
|||
i915_edp_psr_debug_get, i915_edp_psr_debug_set,
|
||||
"%llu\n");
|
||||
|
||||
static int i915_sink_crc(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct intel_connector *connector;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
struct intel_dp *intel_dp = NULL;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
int ret;
|
||||
u8 crc[6];
|
||||
|
||||
drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
|
||||
|
||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||
|
||||
for_each_intel_connector_iter(connector, &conn_iter) {
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_connector_state *state;
|
||||
struct intel_crtc_state *crtc_state;
|
||||
|
||||
if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
|
||||
continue;
|
||||
|
||||
retry:
|
||||
ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
state = connector->base.state;
|
||||
if (!state->best_encoder)
|
||||
continue;
|
||||
|
||||
crtc = state->crtc;
|
||||
ret = drm_modeset_lock(&crtc->mutex, &ctx);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
crtc_state = to_intel_crtc_state(crtc->state);
|
||||
if (!crtc_state->base.active)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We need to wait for all crtc updates to complete, to make
|
||||
* sure any pending modesets and plane updates are completed.
|
||||
*/
|
||||
if (crtc_state->base.commit) {
|
||||
ret = wait_for_completion_interruptible(&crtc_state->base.commit->hw_done);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
intel_dp = enc_to_intel_dp(state->best_encoder);
|
||||
|
||||
ret = intel_dp_sink_crc(intel_dp, crtc_state, crc);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
seq_printf(m, "%02x%02x%02x%02x%02x%02x\n",
|
||||
crc[0], crc[1], crc[2],
|
||||
crc[3], crc[4], crc[5]);
|
||||
goto out;
|
||||
|
||||
err:
|
||||
if (ret == -EDEADLK) {
|
||||
ret = drm_modeset_backoff(&ctx);
|
||||
if (!ret)
|
||||
goto retry;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
ret = -ENODEV;
|
||||
out:
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
drm_modeset_drop_locks(&ctx);
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_energy_uJ(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = node_to_i915(m->private);
|
||||
|
@ -4786,7 +4712,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
|
|||
{"i915_ppgtt_info", i915_ppgtt_info, 0},
|
||||
{"i915_llc", i915_llc, 0},
|
||||
{"i915_edp_psr_status", i915_edp_psr_status, 0},
|
||||
{"i915_sink_crc_eDP1", i915_sink_crc, 0},
|
||||
{"i915_energy_uJ", i915_energy_uJ, 0},
|
||||
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
|
||||
{"i915_power_domain_info", i915_power_domain_info, 0},
|
||||
|
@ -4968,9 +4893,12 @@ int i915_debugfs_connector_add(struct drm_connector *connector)
|
|||
debugfs_create_file("i915_dpcd", S_IRUGO, root,
|
||||
connector, &i915_dpcd_fops);
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
|
||||
debugfs_create_file("i915_panel_timings", S_IRUGO, root,
|
||||
connector, &i915_panel_fops);
|
||||
debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
|
||||
connector, &i915_psr_sink_status_fops);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -104,8 +104,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
|
|||
vaf.fmt = fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
|
||||
__builtin_return_address(0), &vaf);
|
||||
if (is_error)
|
||||
dev_printk(level, kdev, "%pV", &vaf);
|
||||
else
|
||||
dev_printk(level, kdev, "[" DRM_NAME ":%ps] %pV",
|
||||
__builtin_return_address(0), &vaf);
|
||||
|
||||
va_end(args);
|
||||
|
||||
if (is_error && !shown_bug_once) {
|
||||
/*
|
||||
|
@ -117,8 +122,6 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
|
|||
dev_notice(kdev, "%s", FDO_BUG_MSG);
|
||||
shown_bug_once = true;
|
||||
}
|
||||
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
/* Map PCH device id to PCH type, or PCH_NONE if unknown. */
|
||||
|
@ -679,7 +682,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
|
||||
ret = i915_gem_init(dev_priv);
|
||||
if (ret)
|
||||
goto cleanup_irq;
|
||||
goto cleanup_modeset;
|
||||
|
||||
intel_setup_overlay(dev_priv);
|
||||
|
||||
|
@ -699,6 +702,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
if (i915_gem_suspend(dev_priv))
|
||||
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
|
||||
i915_gem_fini(dev_priv);
|
||||
cleanup_modeset:
|
||||
intel_modeset_cleanup(dev);
|
||||
cleanup_irq:
|
||||
drm_irq_uninstall(dev);
|
||||
intel_teardown_gmbus(dev_priv);
|
||||
|
@ -895,7 +900,6 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
|
|||
spin_lock_init(&dev_priv->uncore.lock);
|
||||
|
||||
mutex_init(&dev_priv->sb_lock);
|
||||
mutex_init(&dev_priv->modeset_restore_lock);
|
||||
mutex_init(&dev_priv->av_mutex);
|
||||
mutex_init(&dev_priv->wm.wm_mutex);
|
||||
mutex_init(&dev_priv->pps_mutex);
|
||||
|
@ -1149,8 +1153,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
|
||||
intel_uncore_sanitize(dev_priv);
|
||||
|
||||
intel_opregion_setup(dev_priv);
|
||||
|
||||
i915_gem_load_init_fences(dev_priv);
|
||||
|
||||
/* On the 945G/GM, the chipset reports the MSI capability on the
|
||||
|
@ -1179,10 +1181,16 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
|
|||
|
||||
ret = intel_gvt_init(dev_priv);
|
||||
if (ret)
|
||||
goto err_ggtt;
|
||||
goto err_msi;
|
||||
|
||||
intel_opregion_setup(dev_priv);
|
||||
|
||||
return 0;
|
||||
|
||||
err_msi:
|
||||
if (pdev->msi_enabled)
|
||||
pci_disable_msi(pdev);
|
||||
pm_qos_remove_request(&dev_priv->pm_qos);
|
||||
err_ggtt:
|
||||
i915_ggtt_cleanup_hw(dev_priv);
|
||||
err_perf:
|
||||
|
@ -1415,6 +1423,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
drm_dev_fini(&dev_priv->drm);
|
||||
out_free:
|
||||
kfree(dev_priv);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1560,11 +1569,6 @@ static int i915_drm_suspend(struct drm_device *dev)
|
|||
struct pci_dev *pdev = dev_priv->drm.pdev;
|
||||
pci_power_t opregion_target_state;
|
||||
|
||||
/* ignore lid events during suspend */
|
||||
mutex_lock(&dev_priv->modeset_restore_lock);
|
||||
dev_priv->modeset_restore = MODESET_SUSPENDED;
|
||||
mutex_unlock(&dev_priv->modeset_restore_lock);
|
||||
|
||||
disable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
/* We do a lot of poking in a lot of registers, make sure they work
|
||||
|
@ -1577,7 +1581,7 @@ static int i915_drm_suspend(struct drm_device *dev)
|
|||
|
||||
intel_display_suspend(dev);
|
||||
|
||||
intel_dp_mst_suspend(dev);
|
||||
intel_dp_mst_suspend(dev_priv);
|
||||
|
||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||
intel_hpd_cancel_work(dev_priv);
|
||||
|
@ -1742,7 +1746,7 @@ static int i915_drm_resume(struct drm_device *dev)
|
|||
dev_priv->display.hpd_irq_setup(dev_priv);
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
intel_dp_mst_resume(dev);
|
||||
intel_dp_mst_resume(dev_priv);
|
||||
|
||||
intel_display_resume(dev);
|
||||
|
||||
|
@ -1760,10 +1764,6 @@ static int i915_drm_resume(struct drm_device *dev)
|
|||
|
||||
intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
|
||||
|
||||
mutex_lock(&dev_priv->modeset_restore_lock);
|
||||
dev_priv->modeset_restore = MODESET_DONE;
|
||||
mutex_unlock(&dev_priv->modeset_restore_lock);
|
||||
|
||||
intel_opregion_notify_adapter(dev_priv, PCI_D0);
|
||||
|
||||
enable_rpm_wakeref_asserts(dev_priv);
|
||||
|
|
|
@ -86,8 +86,8 @@
|
|||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20180709"
|
||||
#define DRIVER_TIMESTAMP 1531175967
|
||||
#define DRIVER_DATE "20180719"
|
||||
#define DRIVER_TIMESTAMP 1532015279
|
||||
|
||||
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
|
||||
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
|
||||
|
@ -299,7 +299,6 @@ struct i915_hotplug {
|
|||
u32 event_bits;
|
||||
struct delayed_work reenable_work;
|
||||
|
||||
struct intel_digital_port *irq_port[I915_MAX_PORTS];
|
||||
u32 long_port_mask;
|
||||
u32 short_port_mask;
|
||||
struct work_struct dig_port_work;
|
||||
|
@ -650,6 +649,7 @@ enum intel_sbi_destination {
|
|||
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
|
||||
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
|
||||
#define QUIRK_INCREASE_T12_DELAY (1<<6)
|
||||
#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
|
||||
|
||||
struct intel_fbdev;
|
||||
struct intel_fbc_work;
|
||||
|
@ -1002,12 +1002,6 @@ struct i915_gem_mm {
|
|||
|
||||
#define I915_ENGINE_WEDGED_TIMEOUT (60 * HZ) /* Reset but no recovery? */
|
||||
|
||||
enum modeset_restore {
|
||||
MODESET_ON_LID_OPEN,
|
||||
MODESET_DONE,
|
||||
MODESET_SUSPENDED,
|
||||
};
|
||||
|
||||
#define DP_AUX_A 0x40
|
||||
#define DP_AUX_B 0x10
|
||||
#define DP_AUX_C 0x20
|
||||
|
@ -1730,8 +1724,6 @@ struct drm_i915_private {
|
|||
|
||||
unsigned long quirks;
|
||||
|
||||
enum modeset_restore modeset_restore;
|
||||
struct mutex modeset_restore_lock;
|
||||
struct drm_atomic_state *modeset_restore_state;
|
||||
struct drm_modeset_acquire_ctx reset_ctx;
|
||||
|
||||
|
@ -2557,6 +2549,9 @@ intel_info(const struct drm_i915_private *dev_priv)
|
|||
IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
|
||||
|
||||
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
|
||||
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
|
||||
IS_GEMINILAKE(dev_priv) || \
|
||||
IS_KABYLAKE(dev_priv))
|
||||
|
||||
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
|
||||
* rows, which changed the alignment requirements and fence programming.
|
||||
|
@ -2739,8 +2734,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
|||
void intel_hpd_init(struct drm_i915_private *dev_priv);
|
||||
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
|
||||
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
|
||||
enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
|
||||
enum hpd_pin pin);
|
||||
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
|
||||
enum port port);
|
||||
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
|
||||
|
@ -3304,7 +3297,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *i915,
|
|||
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
|
||||
void i915_gem_shrinker_register(struct drm_i915_private *i915);
|
||||
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
|
||||
|
||||
void i915_gem_shrinker_taints_mutex(struct mutex *mutex);
|
||||
|
||||
/* i915_gem_tiling.c */
|
||||
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
|
||||
|
|
|
@ -802,7 +802,7 @@ void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
|
|||
* that was!).
|
||||
*/
|
||||
|
||||
wmb();
|
||||
i915_gem_chipset_flush(dev_priv);
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
spin_lock_irq(&dev_priv->uncore.lock);
|
||||
|
@ -1627,6 +1627,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
|||
goto err;
|
||||
}
|
||||
|
||||
/* Writes not allowed into this read-only object */
|
||||
if (i915_gem_object_is_readonly(obj)) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
|
||||
|
||||
ret = -ENODEV;
|
||||
|
@ -2012,6 +2018,10 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
|
|||
pgoff_t page_offset;
|
||||
int ret;
|
||||
|
||||
/* Sanity check that we allow writing into this object */
|
||||
if (i915_gem_object_is_readonly(obj) && write)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
/* We don't use vmf->pgoff since that has the fake offset */
|
||||
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
|
||||
|
||||
|
@ -5029,32 +5039,32 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
|
|||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
}
|
||||
|
||||
int i915_gem_suspend(struct drm_i915_private *dev_priv)
|
||||
int i915_gem_suspend(struct drm_i915_private *i915)
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int ret;
|
||||
|
||||
GEM_TRACE("\n");
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
intel_suspend_gt_powersave(dev_priv);
|
||||
intel_runtime_pm_get(i915);
|
||||
intel_suspend_gt_powersave(i915);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
|
||||
/* We have to flush all the executing contexts to main memory so
|
||||
/*
|
||||
* We have to flush all the executing contexts to main memory so
|
||||
* that they can saved in the hibernation image. To ensure the last
|
||||
* context image is coherent, we have to switch away from it. That
|
||||
* leaves the dev_priv->kernel_context still active when
|
||||
* leaves the i915->kernel_context still active when
|
||||
* we actually suspend, and its image in memory may not match the GPU
|
||||
* state. Fortunately, the kernel_context is disposable and we do
|
||||
* not rely on its state.
|
||||
*/
|
||||
if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
|
||||
ret = i915_gem_switch_to_kernel_context(dev_priv);
|
||||
if (!i915_terminally_wedged(&i915->gpu_error)) {
|
||||
ret = i915_gem_switch_to_kernel_context(i915);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
|
||||
ret = i915_gem_wait_for_idle(dev_priv,
|
||||
ret = i915_gem_wait_for_idle(i915,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_LOCKED |
|
||||
I915_WAIT_FOR_IDLE_BOOST,
|
||||
|
@ -5062,33 +5072,37 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
|
|||
if (ret && ret != -EIO)
|
||||
goto err_unlock;
|
||||
|
||||
assert_kernel_context_is_current(dev_priv);
|
||||
assert_kernel_context_is_current(i915);
|
||||
}
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
i915_retire_requests(i915); /* ensure we flush after wedging */
|
||||
|
||||
intel_uc_suspend(dev_priv);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
|
||||
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
|
||||
intel_uc_suspend(i915);
|
||||
|
||||
/* As the idle_work is rearming if it detects a race, play safe and
|
||||
cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
|
||||
cancel_delayed_work_sync(&i915->gt.retire_work);
|
||||
|
||||
/*
|
||||
* As the idle_work is rearming if it detects a race, play safe and
|
||||
* repeat the flush until it is definitely idle.
|
||||
*/
|
||||
drain_delayed_work(&dev_priv->gt.idle_work);
|
||||
drain_delayed_work(&i915->gt.idle_work);
|
||||
|
||||
/* Assert that we sucessfully flushed all the work and
|
||||
/*
|
||||
* Assert that we successfully flushed all the work and
|
||||
* reset the GPU back to its idle, low power state.
|
||||
*/
|
||||
WARN_ON(dev_priv->gt.awake);
|
||||
if (WARN_ON(!intel_engines_are_idle(dev_priv)))
|
||||
i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
|
||||
WARN_ON(i915->gt.awake);
|
||||
if (WARN_ON(!intel_engines_are_idle(i915)))
|
||||
i915_gem_set_wedged(i915); /* no hope, discard everything */
|
||||
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
intel_runtime_pm_put(i915);
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
intel_runtime_pm_put(i915);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -5311,13 +5325,17 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
|
|||
ret = __i915_gem_restart_engines(dev_priv);
|
||||
if (ret)
|
||||
goto cleanup_uc;
|
||||
out:
|
||||
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_uc:
|
||||
intel_uc_fini_hw(dev_priv);
|
||||
goto out;
|
||||
out:
|
||||
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
|
||||
|
@ -5548,6 +5566,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
|
|||
WARN_ON(i915_gem_suspend(dev_priv));
|
||||
i915_gem_suspend_late(dev_priv);
|
||||
|
||||
i915_gem_drain_workqueue(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
intel_uc_fini_hw(dev_priv);
|
||||
err_uc_init:
|
||||
|
|
|
@ -204,9 +204,9 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
|
|||
return err;
|
||||
}
|
||||
|
||||
/* Currently applicable only to VLV */
|
||||
/* Applicable to VLV, and gen8+ */
|
||||
pte_flags = 0;
|
||||
if (vma->obj->gt_ro)
|
||||
if (i915_gem_object_is_readonly(vma->obj))
|
||||
pte_flags |= PTE_READ_ONLY;
|
||||
|
||||
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
|
||||
|
@ -244,10 +244,13 @@ static void clear_pages(struct i915_vma *vma)
|
|||
}
|
||||
|
||||
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
|
||||
enum i915_cache_level level)
|
||||
enum i915_cache_level level,
|
||||
u32 flags)
|
||||
{
|
||||
gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
|
||||
pte |= addr;
|
||||
gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
|
||||
|
||||
if (unlikely(flags & PTE_READ_ONLY))
|
||||
pte &= ~_PAGE_RW;
|
||||
|
||||
switch (level) {
|
||||
case I915_CACHE_NONE:
|
||||
|
@ -531,6 +534,14 @@ static void vm_free_page(struct i915_address_space *vm, struct page *page)
|
|||
static void i915_address_space_init(struct i915_address_space *vm,
|
||||
struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/*
|
||||
* The vm->mutex must be reclaim safe (for use in the shrinker).
|
||||
* Do a dummy acquire now under fs_reclaim so that any allocation
|
||||
* attempt holding the lock is immediately reported by lockdep.
|
||||
*/
|
||||
mutex_init(&vm->mutex);
|
||||
i915_gem_shrinker_taints_mutex(&vm->mutex);
|
||||
|
||||
GEM_BUG_ON(!vm->total);
|
||||
drm_mm_init(&vm->mm, 0, vm->total);
|
||||
vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
|
||||
|
@ -551,6 +562,8 @@ static void i915_address_space_fini(struct i915_address_space *vm)
|
|||
spin_unlock(&vm->free_pages.lock);
|
||||
|
||||
drm_mm_takedown(&vm->mm);
|
||||
|
||||
mutex_destroy(&vm->mutex);
|
||||
}
|
||||
|
||||
static int __setup_page_dma(struct i915_address_space *vm,
|
||||
|
@ -711,7 +724,7 @@ static void gen8_initialize_pt(struct i915_address_space *vm,
|
|||
struct i915_page_table *pt)
|
||||
{
|
||||
fill_px(vm, pt,
|
||||
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
|
||||
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
|
||||
}
|
||||
|
||||
static void gen6_initialize_pt(struct gen6_hw_ppgtt *ppgtt,
|
||||
|
@ -859,7 +872,7 @@ static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
|
|||
unsigned int pte = gen8_pte_index(start);
|
||||
unsigned int pte_end = pte + num_entries;
|
||||
const gen8_pte_t scratch_pte =
|
||||
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
|
||||
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
|
||||
gen8_pte_t *vaddr;
|
||||
|
||||
GEM_BUG_ON(num_entries > pt->used_ptes);
|
||||
|
@ -1031,10 +1044,11 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
|
|||
struct i915_page_directory_pointer *pdp,
|
||||
struct sgt_dma *iter,
|
||||
struct gen8_insert_pte *idx,
|
||||
enum i915_cache_level cache_level)
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
struct i915_page_directory *pd;
|
||||
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
|
||||
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
|
||||
gen8_pte_t *vaddr;
|
||||
bool ret;
|
||||
|
||||
|
@ -1085,14 +1099,14 @@ gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
|
|||
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 unused)
|
||||
u32 flags)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
struct sgt_dma iter = sgt_dma(vma);
|
||||
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
|
||||
|
||||
gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
|
||||
cache_level);
|
||||
cache_level, flags);
|
||||
|
||||
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
|
||||
}
|
||||
|
@ -1100,9 +1114,10 @@ static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
|
|||
static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
|
||||
struct i915_page_directory_pointer **pdps,
|
||||
struct sgt_dma *iter,
|
||||
enum i915_cache_level cache_level)
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags)
|
||||
{
|
||||
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
|
||||
const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
|
||||
u64 start = vma->node.start;
|
||||
dma_addr_t rem = iter->sg->length;
|
||||
|
||||
|
@ -1218,19 +1233,21 @@ static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
|
|||
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 unused)
|
||||
u32 flags)
|
||||
{
|
||||
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
|
||||
struct sgt_dma iter = sgt_dma(vma);
|
||||
struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
|
||||
|
||||
if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
|
||||
gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
|
||||
gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
|
||||
flags);
|
||||
} else {
|
||||
struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
|
||||
|
||||
while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
|
||||
&iter, &idx, cache_level))
|
||||
&iter, &idx, cache_level,
|
||||
flags))
|
||||
GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
|
||||
|
||||
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
|
||||
|
@ -1568,7 +1585,7 @@ static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
|
|||
{
|
||||
struct i915_address_space *vm = &ppgtt->vm;
|
||||
const gen8_pte_t scratch_pte =
|
||||
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
|
||||
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
|
||||
u64 start = 0, length = ppgtt->vm.total;
|
||||
|
||||
if (use_4lvl(vm)) {
|
||||
|
@ -1645,6 +1662,13 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
|
|||
1ULL << 48 :
|
||||
1ULL << 32;
|
||||
|
||||
/*
|
||||
* From bdw, there is support for read-only pages in the PPGTT.
|
||||
*
|
||||
* XXX GVT is not honouring the lack of RW in the PTE bits.
|
||||
*/
|
||||
ppgtt->vm.has_read_only = !intel_vgpu_active(i915);
|
||||
|
||||
i915_address_space_init(&ppgtt->vm, i915);
|
||||
|
||||
/* There are only few exceptions for gen >=6. chv and bxt.
|
||||
|
@ -2451,7 +2475,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
|
|||
gen8_pte_t __iomem *pte =
|
||||
(gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
|
||||
|
||||
gen8_set_pte(pte, gen8_pte_encode(addr, level));
|
||||
gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
|
||||
|
||||
ggtt->invalidate(vm->i915);
|
||||
}
|
||||
|
@ -2459,14 +2483,19 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
|
|||
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level level,
|
||||
u32 unused)
|
||||
u32 flags)
|
||||
{
|
||||
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
|
||||
struct sgt_iter sgt_iter;
|
||||
gen8_pte_t __iomem *gtt_entries;
|
||||
const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
|
||||
const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
|
||||
dma_addr_t addr;
|
||||
|
||||
/*
|
||||
* Note that we ignore PTE_READ_ONLY here. The caller must be careful
|
||||
* not to allow the user to override access to a read only page.
|
||||
*/
|
||||
|
||||
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
|
||||
gtt_entries += vma->node.start >> PAGE_SHIFT;
|
||||
for_each_sgt_dma(addr, sgt_iter, vma->pages)
|
||||
|
@ -2532,7 +2561,7 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
|
|||
unsigned first_entry = start >> PAGE_SHIFT;
|
||||
unsigned num_entries = length >> PAGE_SHIFT;
|
||||
const gen8_pte_t scratch_pte =
|
||||
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
|
||||
gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
|
||||
gen8_pte_t __iomem *gtt_base =
|
||||
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
|
||||
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
|
||||
|
@ -2593,13 +2622,14 @@ struct insert_entries {
|
|||
struct i915_address_space *vm;
|
||||
struct i915_vma *vma;
|
||||
enum i915_cache_level level;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
|
||||
{
|
||||
struct insert_entries *arg = _arg;
|
||||
|
||||
gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
|
||||
gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
|
||||
bxt_vtd_ggtt_wa(arg->vm);
|
||||
|
||||
return 0;
|
||||
|
@ -2608,9 +2638,9 @@ static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
|
|||
static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
|
||||
struct i915_vma *vma,
|
||||
enum i915_cache_level level,
|
||||
u32 unused)
|
||||
u32 flags)
|
||||
{
|
||||
struct insert_entries arg = { vm, vma, level };
|
||||
struct insert_entries arg = { vm, vma, level, flags };
|
||||
|
||||
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
|
||||
}
|
||||
|
@ -2701,9 +2731,9 @@ static int ggtt_bind_vma(struct i915_vma *vma,
|
|||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
u32 pte_flags;
|
||||
|
||||
/* Currently applicable only to VLV */
|
||||
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
|
||||
pte_flags = 0;
|
||||
if (obj->gt_ro)
|
||||
if (i915_gem_object_is_readonly(obj))
|
||||
pte_flags |= PTE_READ_ONLY;
|
||||
|
||||
intel_runtime_pm_get(i915);
|
||||
|
@ -2741,7 +2771,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
|
|||
|
||||
/* Currently applicable only to VLV */
|
||||
pte_flags = 0;
|
||||
if (vma->obj->gt_ro)
|
||||
if (i915_gem_object_is_readonly(vma->obj))
|
||||
pte_flags |= PTE_READ_ONLY;
|
||||
|
||||
if (flags & I915_VMA_LOCAL_BIND) {
|
||||
|
@ -3581,6 +3611,10 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
|
|||
*/
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
i915_address_space_init(&ggtt->vm, dev_priv);
|
||||
|
||||
/* Only VLV supports read-only GGTT mappings */
|
||||
ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
|
||||
|
||||
if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
|
||||
ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
|
|
|
@ -293,6 +293,8 @@ struct i915_address_space {
|
|||
|
||||
bool closed;
|
||||
|
||||
struct mutex mutex; /* protects vma and our lists */
|
||||
|
||||
struct i915_page_dma scratch_page;
|
||||
struct i915_page_table *scratch_pt;
|
||||
struct i915_page_directory *scratch_pd;
|
||||
|
@ -329,7 +331,12 @@ struct i915_address_space {
|
|||
struct list_head unbound_list;
|
||||
|
||||
struct pagestash free_pages;
|
||||
bool pt_kmap_wc;
|
||||
|
||||
/* Some systems require uncached updates of the page directories */
|
||||
bool pt_kmap_wc:1;
|
||||
|
||||
/* Some systems support read-only mappings for GGTT and/or PPGTT */
|
||||
bool has_read_only:1;
|
||||
|
||||
/* FIXME: Need a more generic return type */
|
||||
gen6_pte_t (*pte_encode)(dma_addr_t addr,
|
||||
|
|
|
@ -141,7 +141,6 @@ struct drm_i915_gem_object {
|
|||
* Is the object to be mapped as read-only to the GPU
|
||||
* Only honoured if hardware has relevant pte bit
|
||||
*/
|
||||
unsigned long gt_ro:1;
|
||||
unsigned int cache_level:3;
|
||||
unsigned int cache_coherent:2;
|
||||
#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
|
||||
|
@ -268,7 +267,6 @@ struct drm_i915_gem_object {
|
|||
union {
|
||||
struct i915_gem_userptr {
|
||||
uintptr_t ptr;
|
||||
unsigned read_only :1;
|
||||
|
||||
struct i915_mm_struct *mm;
|
||||
struct i915_mmu_object *mmu_object;
|
||||
|
@ -358,6 +356,18 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
|
|||
reservation_object_unlock(obj->resv);
|
||||
}
|
||||
|
||||
static inline void
|
||||
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
obj->base.vma_node.readonly = true;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->base.vma_node.readonly;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/oom.h>
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/swap.h>
|
||||
|
@ -531,3 +532,14 @@ void i915_gem_shrinker_unregister(struct drm_i915_private *i915)
|
|||
WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
|
||||
unregister_shrinker(&i915->mm.shrinker);
|
||||
}
|
||||
|
||||
void i915_gem_shrinker_taints_mutex(struct mutex *mutex)
|
||||
{
|
||||
if (!IS_ENABLED(CONFIG_LOCKDEP))
|
||||
return;
|
||||
|
||||
fs_reclaim_acquire(GFP_KERNEL);
|
||||
mutex_lock(mutex);
|
||||
mutex_unlock(mutex);
|
||||
fs_reclaim_release(GFP_KERNEL);
|
||||
}
|
||||
|
|
|
@ -344,6 +344,35 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
|||
*size = stolen_top - *base;
|
||||
}
|
||||
|
||||
static void icl_get_stolen_reserved(struct drm_i915_private *dev_priv,
|
||||
resource_size_t *base,
|
||||
resource_size_t *size)
|
||||
{
|
||||
u64 reg_val = I915_READ64(GEN6_STOLEN_RESERVED);
|
||||
|
||||
DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
|
||||
|
||||
*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
|
||||
|
||||
switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
|
||||
case GEN8_STOLEN_RESERVED_1M:
|
||||
*size = 1024 * 1024;
|
||||
break;
|
||||
case GEN8_STOLEN_RESERVED_2M:
|
||||
*size = 2 * 1024 * 1024;
|
||||
break;
|
||||
case GEN8_STOLEN_RESERVED_4M:
|
||||
*size = 4 * 1024 * 1024;
|
||||
break;
|
||||
case GEN8_STOLEN_RESERVED_8M:
|
||||
*size = 8 * 1024 * 1024;
|
||||
break;
|
||||
default:
|
||||
*size = 8 * 1024 * 1024;
|
||||
MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
resource_size_t reserved_base, stolen_top;
|
||||
|
@ -400,7 +429,9 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
|
|||
gen7_get_stolen_reserved(dev_priv,
|
||||
&reserved_base, &reserved_size);
|
||||
break;
|
||||
default:
|
||||
case 8:
|
||||
case 9:
|
||||
case 10:
|
||||
if (IS_LP(dev_priv))
|
||||
chv_get_stolen_reserved(dev_priv,
|
||||
&reserved_base, &reserved_size);
|
||||
|
@ -408,6 +439,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
|
|||
bdw_get_stolen_reserved(dev_priv,
|
||||
&reserved_base, &reserved_size);
|
||||
break;
|
||||
case 11:
|
||||
default:
|
||||
icl_get_stolen_reserved(dev_priv, &reserved_base,
|
||||
&reserved_size);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
|||
struct mm_struct *mm = obj->userptr.mm->mm;
|
||||
unsigned int flags = 0;
|
||||
|
||||
if (!obj->userptr.read_only)
|
||||
if (!i915_gem_object_is_readonly(obj))
|
||||
flags |= FOLL_WRITE;
|
||||
|
||||
ret = -EFAULT;
|
||||
|
@ -643,7 +643,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
|
|||
if (pvec) /* defer to worker if malloc fails */
|
||||
pinned = __get_user_pages_fast(obj->userptr.ptr,
|
||||
num_pages,
|
||||
!obj->userptr.read_only,
|
||||
!i915_gem_object_is_readonly(obj),
|
||||
pvec);
|
||||
}
|
||||
|
||||
|
@ -789,10 +789,15 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
|
|||
return -EFAULT;
|
||||
|
||||
if (args->flags & I915_USERPTR_READ_ONLY) {
|
||||
/* On almost all of the current hw, we cannot tell the GPU that a
|
||||
* page is readonly, so this is just a placeholder in the uAPI.
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
|
||||
/*
|
||||
* On almost all of the older hw, we cannot tell the GPU that
|
||||
* a page is readonly.
|
||||
*/
|
||||
return -ENODEV;
|
||||
ppgtt = dev_priv->kernel_context->ppgtt;
|
||||
if (!ppgtt || !ppgtt->vm.has_read_only)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
obj = i915_gem_object_alloc(dev_priv);
|
||||
|
@ -806,7 +811,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
|
|||
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
|
||||
|
||||
obj->userptr.ptr = args->user_ptr;
|
||||
obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
|
||||
if (args->flags & I915_USERPTR_READ_ONLY)
|
||||
i915_gem_object_set_readonly(obj);
|
||||
|
||||
/* And keep a pointer to the current->mm for resolving the user pages
|
||||
* at binding. This means that we need to hook into the mmu_notifier
|
||||
|
|
|
@ -263,9 +263,9 @@ static u32
|
|||
gen11_gt_engine_identity(struct drm_i915_private * const i915,
|
||||
const unsigned int bank, const unsigned int bit);
|
||||
|
||||
bool gen11_reset_one_iir(struct drm_i915_private * const i915,
|
||||
const unsigned int bank,
|
||||
const unsigned int bit)
|
||||
static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
|
||||
const unsigned int bank,
|
||||
const unsigned int bit)
|
||||
{
|
||||
void __iomem * const regs = i915->regs;
|
||||
u32 dw;
|
||||
|
@ -1576,122 +1576,122 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
|
|||
}
|
||||
}
|
||||
|
||||
static bool gen11_port_hotplug_long_detect(enum port port, u32 val)
|
||||
static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_C:
|
||||
switch (pin) {
|
||||
case HPD_PORT_C:
|
||||
return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
|
||||
case PORT_D:
|
||||
case HPD_PORT_D:
|
||||
return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
|
||||
case PORT_E:
|
||||
case HPD_PORT_E:
|
||||
return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
|
||||
case PORT_F:
|
||||
case HPD_PORT_F:
|
||||
return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
|
||||
static bool bxt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
switch (pin) {
|
||||
case HPD_PORT_A:
|
||||
return val & PORTA_HOTPLUG_LONG_DETECT;
|
||||
case PORT_B:
|
||||
case HPD_PORT_B:
|
||||
return val & PORTB_HOTPLUG_LONG_DETECT;
|
||||
case PORT_C:
|
||||
case HPD_PORT_C:
|
||||
return val & PORTC_HOTPLUG_LONG_DETECT;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool icp_ddi_port_hotplug_long_detect(enum port port, u32 val)
|
||||
static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
switch (pin) {
|
||||
case HPD_PORT_A:
|
||||
return val & ICP_DDIA_HPD_LONG_DETECT;
|
||||
case PORT_B:
|
||||
case HPD_PORT_B:
|
||||
return val & ICP_DDIB_HPD_LONG_DETECT;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool icp_tc_port_hotplug_long_detect(enum port port, u32 val)
|
||||
static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_C:
|
||||
switch (pin) {
|
||||
case HPD_PORT_C:
|
||||
return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
|
||||
case PORT_D:
|
||||
case HPD_PORT_D:
|
||||
return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
|
||||
case PORT_E:
|
||||
case HPD_PORT_E:
|
||||
return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
|
||||
case PORT_F:
|
||||
case HPD_PORT_F:
|
||||
return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
|
||||
static bool spt_port_hotplug2_long_detect(enum hpd_pin pin, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_E:
|
||||
switch (pin) {
|
||||
case HPD_PORT_E:
|
||||
return val & PORTE_HOTPLUG_LONG_DETECT;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool spt_port_hotplug_long_detect(enum port port, u32 val)
|
||||
static bool spt_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
switch (pin) {
|
||||
case HPD_PORT_A:
|
||||
return val & PORTA_HOTPLUG_LONG_DETECT;
|
||||
case PORT_B:
|
||||
case HPD_PORT_B:
|
||||
return val & PORTB_HOTPLUG_LONG_DETECT;
|
||||
case PORT_C:
|
||||
case HPD_PORT_C:
|
||||
return val & PORTC_HOTPLUG_LONG_DETECT;
|
||||
case PORT_D:
|
||||
case HPD_PORT_D:
|
||||
return val & PORTD_HOTPLUG_LONG_DETECT;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
|
||||
static bool ilk_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
switch (pin) {
|
||||
case HPD_PORT_A:
|
||||
return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool pch_port_hotplug_long_detect(enum port port, u32 val)
|
||||
static bool pch_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_B:
|
||||
switch (pin) {
|
||||
case HPD_PORT_B:
|
||||
return val & PORTB_HOTPLUG_LONG_DETECT;
|
||||
case PORT_C:
|
||||
case HPD_PORT_C:
|
||||
return val & PORTC_HOTPLUG_LONG_DETECT;
|
||||
case PORT_D:
|
||||
case HPD_PORT_D:
|
||||
return val & PORTD_HOTPLUG_LONG_DETECT;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
|
||||
static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_B:
|
||||
switch (pin) {
|
||||
case HPD_PORT_B:
|
||||
return val & PORTB_HOTPLUG_INT_LONG_PULSE;
|
||||
case PORT_C:
|
||||
case HPD_PORT_C:
|
||||
return val & PORTC_HOTPLUG_INT_LONG_PULSE;
|
||||
case PORT_D:
|
||||
case HPD_PORT_D:
|
||||
return val & PORTD_HOTPLUG_INT_LONG_PULSE;
|
||||
default:
|
||||
return false;
|
||||
|
@ -1709,27 +1709,22 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
|
|||
u32 *pin_mask, u32 *long_mask,
|
||||
u32 hotplug_trigger, u32 dig_hotplug_reg,
|
||||
const u32 hpd[HPD_NUM_PINS],
|
||||
bool long_pulse_detect(enum port port, u32 val))
|
||||
bool long_pulse_detect(enum hpd_pin pin, u32 val))
|
||||
{
|
||||
enum port port;
|
||||
int i;
|
||||
enum hpd_pin pin;
|
||||
|
||||
for_each_hpd_pin(i) {
|
||||
if ((hpd[i] & hotplug_trigger) == 0)
|
||||
for_each_hpd_pin(pin) {
|
||||
if ((hpd[pin] & hotplug_trigger) == 0)
|
||||
continue;
|
||||
|
||||
*pin_mask |= BIT(i);
|
||||
*pin_mask |= BIT(pin);
|
||||
|
||||
port = intel_hpd_pin_to_port(dev_priv, i);
|
||||
if (port == PORT_NONE)
|
||||
continue;
|
||||
|
||||
if (long_pulse_detect(port, dig_hotplug_reg))
|
||||
*long_mask |= BIT(i);
|
||||
if (long_pulse_detect(pin, dig_hotplug_reg))
|
||||
*long_mask |= BIT(pin);
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
|
||||
hotplug_trigger, dig_hotplug_reg, *pin_mask);
|
||||
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
|
||||
hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -44,10 +44,6 @@ i915_param_named(modeset, int, 0400,
|
|||
"Use kernel modesetting [KMS] (0=disable, "
|
||||
"1=on, -1=force vga console preference [default])");
|
||||
|
||||
i915_param_named_unsafe(panel_ignore_lid, int, 0600,
|
||||
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
|
||||
"-1=force lid closed, -2=force lid open)");
|
||||
|
||||
i915_param_named_unsafe(enable_dc, int, 0400,
|
||||
"Enable power-saving display C-states. "
|
||||
"(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)");
|
||||
|
@ -92,7 +88,7 @@ i915_param_named_unsafe(enable_ppgtt, int, 0400,
|
|||
|
||||
i915_param_named_unsafe(enable_psr, int, 0600,
|
||||
"Enable PSR "
|
||||
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
|
||||
"(0=disabled, 1=enabled) "
|
||||
"Default: -1 (use per-chip default)");
|
||||
|
||||
i915_param_named_unsafe(alpha_support, bool, 0400,
|
||||
|
|
|
@ -36,7 +36,6 @@ struct drm_printer;
|
|||
#define I915_PARAMS_FOR_EACH(param) \
|
||||
param(char *, vbt_firmware, NULL) \
|
||||
param(int, modeset, -1) \
|
||||
param(int, panel_ignore_lid, 1) \
|
||||
param(int, lvds_channel_mode, 0) \
|
||||
param(int, panel_use_ssc, -1) \
|
||||
param(int, vbt_sdvo_panel_type, -1) \
|
||||
|
|
|
@ -674,10 +674,16 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
|
|||
|
||||
static void i915_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct drm_device *dev;
|
||||
|
||||
dev = pci_get_drvdata(pdev);
|
||||
if (!dev) /* driver load aborted, nothing to cleanup */
|
||||
return;
|
||||
|
||||
i915_driver_unload(dev);
|
||||
drm_dev_put(dev);
|
||||
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
}
|
||||
|
||||
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
@ -712,6 +718,11 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
if (i915_inject_load_failure()) {
|
||||
i915_pci_remove(pdev);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
err = i915_live_selftests(pdev);
|
||||
if (err) {
|
||||
i915_pci_remove(pdev);
|
||||
|
|
|
@ -412,6 +412,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||
#define GEN8_STOLEN_RESERVED_4M (2 << 7)
|
||||
#define GEN8_STOLEN_RESERVED_8M (3 << 7)
|
||||
#define GEN6_STOLEN_RESERVED_ENABLE (1 << 0)
|
||||
#define GEN11_STOLEN_RESERVED_ADDR_MASK (0xFFFFFFFFFFFULL << 20)
|
||||
|
||||
/* VGA stuff */
|
||||
|
||||
|
@ -3122,6 +3123,7 @@ enum i915_power_well_id {
|
|||
#define GMBUS_RATE_400KHZ (2 << 8) /* reserved on Pineview */
|
||||
#define GMBUS_RATE_1MHZ (3 << 8) /* reserved on Pineview */
|
||||
#define GMBUS_HOLD_EXT (1 << 7) /* 300ns hold time, rsvd on Pineview */
|
||||
#define GMBUS_BYTE_CNT_OVERRIDE (1 << 6)
|
||||
#define GMBUS_PIN_DISABLED 0
|
||||
#define GMBUS_PIN_SSC 1
|
||||
#define GMBUS_PIN_VGADDC 2
|
||||
|
@ -3151,6 +3153,7 @@ enum i915_power_well_id {
|
|||
#define GMBUS_CYCLE_STOP (4 << 25)
|
||||
#define GMBUS_BYTE_COUNT_SHIFT 16
|
||||
#define GMBUS_BYTE_COUNT_MAX 256U
|
||||
#define GEN9_GMBUS_BYTE_COUNT_MAX 511U
|
||||
#define GMBUS_SLAVE_INDEX_SHIFT 8
|
||||
#define GMBUS_SLAVE_ADDR_SHIFT 1
|
||||
#define GMBUS_SLAVE_READ (1 << 0)
|
||||
|
@ -4602,6 +4605,16 @@ enum {
|
|||
#define VIDEO_DIP_ENABLE_GMP_HSW (1 << 4)
|
||||
#define VIDEO_DIP_ENABLE_SPD_HSW (1 << 0)
|
||||
|
||||
#define DRM_DIP_ENABLE (1 << 28)
|
||||
#define PSR_VSC_BIT_7_SET (1 << 27)
|
||||
#define VSC_SELECT_MASK (0x3 << 26)
|
||||
#define VSC_SELECT_SHIFT 26
|
||||
#define VSC_DIP_HW_HEA_DATA (0 << 26)
|
||||
#define VSC_DIP_HW_HEA_SW_DATA (1 << 26)
|
||||
#define VSC_DIP_HW_DATA_SW_HEA (2 << 26)
|
||||
#define VSC_DIP_SW_HEA_DATA (3 << 26)
|
||||
#define VDIP_ENABLE_PPS (1 << 24)
|
||||
|
||||
/* Panel power sequencing */
|
||||
#define PPS_BASE 0x61200
|
||||
#define VLV_PPS_BASE (VLV_DISPLAY_BASE + PPS_BASE)
|
||||
|
@ -7665,6 +7678,110 @@ enum {
|
|||
|
||||
#define SHOTPLUG_CTL_TC _MMIO(0xc4034)
|
||||
#define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4)
|
||||
/* Icelake DSC Rate Control Range Parameter Registers */
|
||||
#define DSCA_RC_RANGE_PARAMETERS_0 _MMIO(0x6B240)
|
||||
#define DSCA_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6B240 + 4)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_0 _MMIO(0x6BA40)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_0_UDW _MMIO(0x6BA40 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PB (0x78208)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB (0x78208 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PB (0x78308)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB (0x78308 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_PC (0x78408)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC (0x78408 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_PC (0x78508)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC (0x78508 + 4)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_0_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_0_PC)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_0_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_0_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW_PC)
|
||||
#define RC_BPG_OFFSET_SHIFT 10
|
||||
#define RC_MAX_QP_SHIFT 5
|
||||
#define RC_MIN_QP_SHIFT 0
|
||||
|
||||
#define DSCA_RC_RANGE_PARAMETERS_1 _MMIO(0x6B248)
|
||||
#define DSCA_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6B248 + 4)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_1 _MMIO(0x6BA48)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_1_UDW _MMIO(0x6BA48 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PB (0x78210)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB (0x78210 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PB (0x78310)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB (0x78310 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_PC (0x78410)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC (0x78410 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_PC (0x78510)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC (0x78510 + 4)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_1_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_1_PC)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_1_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_1_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW_PC)
|
||||
|
||||
#define DSCA_RC_RANGE_PARAMETERS_2 _MMIO(0x6B250)
|
||||
#define DSCA_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6B250 + 4)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_2 _MMIO(0x6BA50)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_2_UDW _MMIO(0x6BA50 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PB (0x78218)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB (0x78218 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PB (0x78318)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB (0x78318 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_PC (0x78418)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC (0x78418 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_PC (0x78518)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC (0x78518 + 4)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_2_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_2_PC)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_2_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_2_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW_PC)
|
||||
|
||||
#define DSCA_RC_RANGE_PARAMETERS_3 _MMIO(0x6B258)
|
||||
#define DSCA_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6B258 + 4)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_3 _MMIO(0x6BA58)
|
||||
#define DSCC_RC_RANGE_PARAMETERS_3_UDW _MMIO(0x6BA58 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PB (0x78220)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB (0x78220 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PB (0x78320)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB (0x78320 + 4)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_PC (0x78420)
|
||||
#define _ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC (0x78420 + 4)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_PC (0x78520)
|
||||
#define _ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC (0x78520 + 4)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_3_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_3_PC)
|
||||
#define ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PB, \
|
||||
_ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_3_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_3_PC)
|
||||
#define ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PB, \
|
||||
_ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW_PC)
|
||||
|
||||
#define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4)
|
||||
#define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4)
|
||||
|
||||
|
@ -7840,12 +7957,25 @@ enum {
|
|||
#define _HSW_VIDEO_DIP_VSC_ECC_B 0x61344
|
||||
#define _HSW_VIDEO_DIP_GCP_B 0x61210
|
||||
|
||||
/* Icelake PPS_DATA and _ECC DIP Registers.
|
||||
* These are available for transcoders B,C and eDP.
|
||||
* Adding the _A so as to reuse the _MMIO_TRANS2
|
||||
* definition, with which it offsets to the right location.
|
||||
*/
|
||||
|
||||
#define _ICL_VIDEO_DIP_PPS_DATA_A 0x60350
|
||||
#define _ICL_VIDEO_DIP_PPS_DATA_B 0x61350
|
||||
#define _ICL_VIDEO_DIP_PPS_ECC_A 0x603D4
|
||||
#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
|
||||
|
||||
#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
|
||||
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
|
||||
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
|
||||
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
|
||||
#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
|
||||
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
|
||||
#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
|
||||
#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
|
||||
|
||||
#define _HSW_STEREO_3D_CTL_A 0x70020
|
||||
#define S3D_ENABLE (1 << 31)
|
||||
|
@ -10218,4 +10348,310 @@ enum skl_power_gate {
|
|||
_ICL_PHY_MISC_B)
|
||||
#define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23)
|
||||
|
||||
/* Icelake Display Stream Compression Registers */
|
||||
#define DSCA_PICTURE_PARAMETER_SET_0 0x6B200
|
||||
#define DSCC_PICTURE_PARAMETER_SET_0 0x6BA00
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PB 0x78270
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PB 0x78370
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_0_PC 0x78470
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_0_PC 0x78570
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_0_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_0_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_0_PC)
|
||||
#define DSC_VBR_ENABLE (1 << 19)
|
||||
#define DSC_422_ENABLE (1 << 18)
|
||||
#define DSC_COLOR_SPACE_CONVERSION (1 << 17)
|
||||
#define DSC_BLOCK_PREDICTION (1 << 16)
|
||||
#define DSC_LINE_BUF_DEPTH_SHIFT 12
|
||||
#define DSC_BPC_SHIFT 8
|
||||
#define DSC_VER_MIN_SHIFT 4
|
||||
#define DSC_VER_MAJ (0x1 << 0)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_1 0x6B204
|
||||
#define DSCC_PICTURE_PARAMETER_SET_1 0x6BA04
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PB 0x78274
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PB 0x78374
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_1_PC 0x78474
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_1_PC 0x78574
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_1_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_1_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_1_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_1_PC)
|
||||
#define DSC_BPP(bpp) ((bpp) << 0)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_2 0x6B208
|
||||
#define DSCC_PICTURE_PARAMETER_SET_2 0x6BA08
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PB 0x78278
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PB 0x78378
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_2_PC 0x78478
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_2_PC 0x78578
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_2_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_2_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_2_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_2_PC)
|
||||
#define DSC_PIC_WIDTH(pic_width) ((pic_width) << 16)
|
||||
#define DSC_PIC_HEIGHT(pic_height) ((pic_height) << 0)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_3 0x6B20C
|
||||
#define DSCC_PICTURE_PARAMETER_SET_3 0x6BA0C
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PB 0x7827C
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PB 0x7837C
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_3_PC 0x7847C
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_3_PC 0x7857C
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_3_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_3_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_3_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_3_PC)
|
||||
#define DSC_SLICE_WIDTH(slice_width) ((slice_width) << 16)
|
||||
#define DSC_SLICE_HEIGHT(slice_height) ((slice_height) << 0)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_4 0x6B210
|
||||
#define DSCC_PICTURE_PARAMETER_SET_4 0x6BA10
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PB 0x78280
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PB 0x78380
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_4_PC 0x78480
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_4_PC 0x78580
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_4_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_4_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_4_PC)
|
||||
#define DSC_INITIAL_DEC_DELAY(dec_delay) ((dec_delay) << 16)
|
||||
#define DSC_INITIAL_XMIT_DELAY(xmit_delay) ((xmit_delay) << 0)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_5 0x6B214
|
||||
#define DSCC_PICTURE_PARAMETER_SET_5 0x6BA14
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PB 0x78284
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PB 0x78384
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_5_PC 0x78484
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_5_PC 0x78584
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_5_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_5_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_5_PC, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_5_PC)
|
||||
#define DSC_SCALE_DEC_INTINT(scale_dec) ((scale_dec) << 16)
|
||||
#define DSC_SCALE_INC_INT(scale_inc) ((scale_inc) << 0)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_6 0x6B218
|
||||
#define DSCC_PICTURE_PARAMETER_SET_6 0x6BA18
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PB 0x78288
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PB 0x78388
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_6_PC 0x78488
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_6_PC 0x78588
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_6_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_6_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_6_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_6_PC)
|
||||
#define DSC_FLATNESS_MAX_QP(max_qp) (qp << 24)
|
||||
#define DSC_FLATNESS_MIN_QP(min_qp) (qp << 16)
|
||||
#define DSC_FIRST_LINE_BPG_OFFSET(offset) ((offset) << 8)
|
||||
#define DSC_INITIAL_SCALE_VALUE(value) ((value) << 0)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_7 0x6B21C
|
||||
#define DSCC_PICTURE_PARAMETER_SET_7 0x6BA1C
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PB 0x7828C
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PB 0x7838C
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_7_PC 0x7848C
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_7_PC 0x7858C
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_7_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_7_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_7_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_7_PC)
|
||||
#define DSC_NFL_BPG_OFFSET(bpg_offset) ((bpg_offset) << 16)
|
||||
#define DSC_SLICE_BPG_OFFSET(bpg_offset) ((bpg_offset) << 0)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_8 0x6B220
|
||||
#define DSCC_PICTURE_PARAMETER_SET_8 0x6BA20
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PB 0x78290
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PB 0x78390
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_8_PC 0x78490
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_8_PC 0x78590
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_8_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_8_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_8_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_8_PC)
|
||||
#define DSC_INITIAL_OFFSET(initial_offset) ((initial_offset) << 16)
|
||||
#define DSC_FINAL_OFFSET(final_offset) ((final_offset) << 0)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_9 0x6B224
|
||||
#define DSCC_PICTURE_PARAMETER_SET_9 0x6BA24
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PB 0x78294
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PB 0x78394
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_9_PC 0x78494
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_9_PC 0x78594
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_9_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_9_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_9_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_9_PC)
|
||||
#define DSC_RC_EDGE_FACTOR(rc_edge_fact) ((rc_edge_fact) << 16)
|
||||
#define DSC_RC_MODEL_SIZE(rc_model_size) ((rc_model_size) << 0)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_10 0x6B228
|
||||
#define DSCC_PICTURE_PARAMETER_SET_10 0x6BA28
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PB 0x78298
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PB 0x78398
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_10_PC 0x78498
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_10_PC 0x78598
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_10_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_10_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_10_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_10_PC)
|
||||
#define DSC_RC_TARGET_OFF_LOW(rc_tgt_off_low) ((rc_tgt_off_low) << 20)
|
||||
#define DSC_RC_TARGET_OFF_HIGH(rc_tgt_off_high) ((rc_tgt_off_high) << 16)
|
||||
#define DSC_RC_QUANT_INC_LIMIT1(lim) ((lim) << 8)
|
||||
#define DSC_RC_QUANT_INC_LIMIT0(lim) ((lim) << 0)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_11 0x6B22C
|
||||
#define DSCC_PICTURE_PARAMETER_SET_11 0x6BA2C
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PB 0x7829C
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PB 0x7839C
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_11_PC 0x7849C
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_11_PC 0x7859C
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_11_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_11_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_11(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_11_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_11_PC)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_12 0x6B260
|
||||
#define DSCC_PICTURE_PARAMETER_SET_12 0x6BA60
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PB 0x782A0
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PB 0x783A0
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_12_PC 0x784A0
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_12_PC 0x785A0
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_12_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_12_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_12(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_12_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_12_PC)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_13 0x6B264
|
||||
#define DSCC_PICTURE_PARAMETER_SET_13 0x6BA64
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PB 0x782A4
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PB 0x783A4
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_13_PC 0x784A4
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_13_PC 0x785A4
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_13_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_13_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_13(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_13_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_13_PC)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_14 0x6B268
|
||||
#define DSCC_PICTURE_PARAMETER_SET_14 0x6BA68
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PB 0x782A8
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PB 0x783A8
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_14_PC 0x784A8
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_14_PC 0x785A8
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_14_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_14_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_14(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_14_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_14_PC)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_15 0x6B26C
|
||||
#define DSCC_PICTURE_PARAMETER_SET_15 0x6BA6C
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PB 0x782AC
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PB 0x783AC
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_15_PC 0x784AC
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_15_PC 0x785AC
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_15_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_15_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_15(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_15_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_15_PC)
|
||||
|
||||
#define DSCA_PICTURE_PARAMETER_SET_16 0x6B270
|
||||
#define DSCC_PICTURE_PARAMETER_SET_16 0x6BA70
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PB 0x782B0
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PB 0x783B0
|
||||
#define _ICL_DSC0_PICTURE_PARAMETER_SET_16_PC 0x784B0
|
||||
#define _ICL_DSC1_PICTURE_PARAMETER_SET_16_PC 0x785B0
|
||||
#define ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_16_PB, \
|
||||
_ICL_DSC0_PICTURE_PARAMETER_SET_16_PC)
|
||||
#define ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_16_PB, \
|
||||
_ICL_DSC1_PICTURE_PARAMETER_SET_16_PC)
|
||||
#define DSC_SLICE_PER_LINE(slice_per_line) ((slice_per_line) << 16)
|
||||
#define DSC_SLICE_CHUNK_SIZE(slice_chunk_aize) (slice_chunk_size << 0)
|
||||
|
||||
/* Icelake Rate Control Buffer Threshold Registers */
|
||||
#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
|
||||
#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
|
||||
#define DSCC_RC_BUF_THRESH_0 _MMIO(0x6BA30)
|
||||
#define DSCC_RC_BUF_THRESH_0_UDW _MMIO(0x6BA30 + 4)
|
||||
#define _ICL_DSC0_RC_BUF_THRESH_0_PB (0x78254)
|
||||
#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PB (0x78254 + 4)
|
||||
#define _ICL_DSC1_RC_BUF_THRESH_0_PB (0x78354)
|
||||
#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PB (0x78354 + 4)
|
||||
#define _ICL_DSC0_RC_BUF_THRESH_0_PC (0x78454)
|
||||
#define _ICL_DSC0_RC_BUF_THRESH_0_UDW_PC (0x78454 + 4)
|
||||
#define _ICL_DSC1_RC_BUF_THRESH_0_PC (0x78554)
|
||||
#define _ICL_DSC1_RC_BUF_THRESH_0_UDW_PC (0x78554 + 4)
|
||||
#define ICL_DSC0_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_BUF_THRESH_0_PB, \
|
||||
_ICL_DSC0_RC_BUF_THRESH_0_PC)
|
||||
#define ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_BUF_THRESH_0_UDW_PB, \
|
||||
_ICL_DSC0_RC_BUF_THRESH_0_UDW_PC)
|
||||
#define ICL_DSC1_RC_BUF_THRESH_0(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_BUF_THRESH_0_PB, \
|
||||
_ICL_DSC1_RC_BUF_THRESH_0_PC)
|
||||
#define ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_BUF_THRESH_0_UDW_PB, \
|
||||
_ICL_DSC1_RC_BUF_THRESH_0_UDW_PC)
|
||||
|
||||
#define DSCA_RC_BUF_THRESH_1 _MMIO(0x6B238)
|
||||
#define DSCA_RC_BUF_THRESH_1_UDW _MMIO(0x6B238 + 4)
|
||||
#define DSCC_RC_BUF_THRESH_1 _MMIO(0x6BA38)
|
||||
#define DSCC_RC_BUF_THRESH_1_UDW _MMIO(0x6BA38 + 4)
|
||||
#define _ICL_DSC0_RC_BUF_THRESH_1_PB (0x7825C)
|
||||
#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PB (0x7825C + 4)
|
||||
#define _ICL_DSC1_RC_BUF_THRESH_1_PB (0x7835C)
|
||||
#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PB (0x7835C + 4)
|
||||
#define _ICL_DSC0_RC_BUF_THRESH_1_PC (0x7845C)
|
||||
#define _ICL_DSC0_RC_BUF_THRESH_1_UDW_PC (0x7845C + 4)
|
||||
#define _ICL_DSC1_RC_BUF_THRESH_1_PC (0x7855C)
|
||||
#define _ICL_DSC1_RC_BUF_THRESH_1_UDW_PC (0x7855C + 4)
|
||||
#define ICL_DSC0_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_BUF_THRESH_1_PB, \
|
||||
_ICL_DSC0_RC_BUF_THRESH_1_PC)
|
||||
#define ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC0_RC_BUF_THRESH_1_UDW_PB, \
|
||||
_ICL_DSC0_RC_BUF_THRESH_1_UDW_PC)
|
||||
#define ICL_DSC1_RC_BUF_THRESH_1(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_BUF_THRESH_1_PB, \
|
||||
_ICL_DSC1_RC_BUF_THRESH_1_PC)
|
||||
#define ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe) _MMIO_PIPE((pipe) - PIPE_B, \
|
||||
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PB, \
|
||||
_ICL_DSC1_RC_BUF_THRESH_1_UDW_PC)
|
||||
|
||||
#endif /* _I915_REG_H_ */
|
||||
|
|
|
@ -99,6 +99,6 @@ __printf(2, 3)
|
|||
bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
|
||||
|
||||
#define igt_timeout(t, fmt, ...) \
|
||||
__igt_timeout((t), KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
|
||||
__igt_timeout((t), KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
|
||||
|
||||
#endif /* !__I915_SELFTEST_H__ */
|
||||
|
|
|
@ -942,6 +942,14 @@ static struct i915_gem_active *active_instance(struct i915_vma *vma, u64 idx)
|
|||
}
|
||||
|
||||
active = kmalloc(sizeof(*active), GFP_KERNEL);
|
||||
|
||||
/* kmalloc may retire the vma->last_active request (thanks shrinker)! */
|
||||
if (unlikely(!i915_gem_active_raw(&vma->last_active,
|
||||
&vma->vm->i915->drm.struct_mutex))) {
|
||||
kfree(active);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(!active))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
|
|
@ -1808,15 +1808,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
|
|||
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
|
||||
}
|
||||
|
||||
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder)
|
||||
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
|
||||
uint32_t val = I915_READ(reg);
|
||||
|
||||
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
|
||||
val |= TRANS_DDI_PORT_NONE;
|
||||
I915_WRITE(reg, val);
|
||||
|
||||
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
|
||||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
|
||||
DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
|
||||
/* Quirk time at 100ms for reliable operation */
|
||||
msleep(100);
|
||||
}
|
||||
}
|
||||
|
||||
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
|
||||
|
@ -3630,7 +3639,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
|||
goto err;
|
||||
|
||||
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
||||
dev_priv->hotplug.irq_port[port] = intel_dig_port;
|
||||
}
|
||||
|
||||
/* In theory we don't need the encoder->type check, but leave it just in
|
||||
|
|
|
@ -5837,7 +5837,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
|
|||
intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
|
||||
|
||||
if (!transcoder_is_dsi(cpu_transcoder))
|
||||
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
|
||||
intel_ddi_disable_transcoder_func(old_crtc_state);
|
||||
|
||||
if (INTEL_GEN(dev_priv) >= 9)
|
||||
skylake_scaler_disable(intel_crtc);
|
||||
|
@ -14849,6 +14849,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
|
|||
DRM_INFO("Applying T12 delay quirk\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* GeminiLake NUC HDMI outputs require additional off time
|
||||
* this allows the onboard retimer to correctly sync to signal
|
||||
*/
|
||||
static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
|
||||
DRM_INFO("Applying Increase DDI Disabled quirk\n");
|
||||
}
|
||||
|
||||
struct intel_quirk {
|
||||
int device;
|
||||
int subsystem_vendor;
|
||||
|
@ -14935,6 +14947,13 @@ static struct intel_quirk intel_quirks[] = {
|
|||
|
||||
/* Toshiba Satellite P50-C-18C */
|
||||
{ 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
|
||||
|
||||
/* GeminiLake NUC */
|
||||
{ 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
|
||||
{ 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
|
||||
/* ASRock ITX*/
|
||||
{ 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
|
||||
{ 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
|
||||
};
|
||||
|
||||
static void intel_init_quirks(struct drm_device *dev)
|
||||
|
@ -15890,6 +15909,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
flush_workqueue(dev_priv->modeset_wq);
|
||||
|
||||
flush_work(&dev_priv->atomic_helper.free_work);
|
||||
WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
|
||||
|
||||
|
|
|
@ -289,6 +289,10 @@ struct intel_link_m_n {
|
|||
&(dev)->mode_config.encoder_list, \
|
||||
base.head)
|
||||
|
||||
#define for_each_intel_dp(dev, intel_encoder) \
|
||||
for_each_intel_encoder(dev, intel_encoder) \
|
||||
for_each_if(intel_encoder_is_dp(intel_encoder))
|
||||
|
||||
#define for_each_intel_connector_iter(intel_connector, iter) \
|
||||
while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
|
||||
|
||||
|
|
|
@ -600,14 +600,8 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
|
|||
* We don't have power sequencer currently.
|
||||
* Pick one that's not used by other ports.
|
||||
*/
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp;
|
||||
|
||||
if (encoder->type != INTEL_OUTPUT_DP &&
|
||||
encoder->type != INTEL_OUTPUT_EDP)
|
||||
continue;
|
||||
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
for_each_intel_dp(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
if (encoder->type == INTEL_OUTPUT_EDP) {
|
||||
WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
|
||||
|
@ -799,19 +793,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
|
|||
* should use them always.
|
||||
*/
|
||||
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp;
|
||||
|
||||
if (encoder->type != INTEL_OUTPUT_DP &&
|
||||
encoder->type != INTEL_OUTPUT_EDP &&
|
||||
encoder->type != INTEL_OUTPUT_DDI)
|
||||
continue;
|
||||
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
/* Skip pure DVI/HDMI DDI encoders */
|
||||
if (!i915_mmio_reg_valid(intel_dp->output_reg))
|
||||
continue;
|
||||
for_each_intel_dp(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
|
||||
|
||||
|
@ -2830,10 +2813,6 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *old_crtc_state,
|
||||
const struct drm_connector_state *old_conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
intel_psr_disable(intel_dp, old_crtc_state);
|
||||
|
||||
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
|
||||
}
|
||||
|
||||
|
@ -3046,10 +3025,7 @@ static void vlv_enable_dp(struct intel_encoder *encoder,
|
|||
const struct intel_crtc_state *pipe_config,
|
||||
const struct drm_connector_state *conn_state)
|
||||
{
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
intel_edp_backlight_on(pipe_config, conn_state);
|
||||
intel_psr_enable(intel_dp, pipe_config);
|
||||
}
|
||||
|
||||
static void g4x_pre_enable_dp(struct intel_encoder *encoder,
|
||||
|
@ -3104,16 +3080,9 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
|
|||
|
||||
lockdep_assert_held(&dev_priv->pps_mutex);
|
||||
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp;
|
||||
enum port port;
|
||||
|
||||
if (encoder->type != INTEL_OUTPUT_DP &&
|
||||
encoder->type != INTEL_OUTPUT_EDP)
|
||||
continue;
|
||||
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
port = dp_to_dig_port(intel_dp)->base.port;
|
||||
for_each_intel_dp(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
enum port port = encoder->port;
|
||||
|
||||
WARN(intel_dp->active_pipe == pipe,
|
||||
"stealing pipe %c power sequencer from active (e)DP port %c\n",
|
||||
|
@ -3905,129 +3874,6 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
|
|||
intel_dp->is_mst);
|
||||
}
|
||||
|
||||
static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state, bool disable_wa)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
u8 buf;
|
||||
int ret = 0;
|
||||
int count = 0;
|
||||
int attempts = 10;
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
|
||||
DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
|
||||
buf & ~DP_TEST_SINK_START) < 0) {
|
||||
DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
do {
|
||||
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
DP_TEST_SINK_MISC, &buf) < 0) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
count = buf & DP_TEST_COUNT_MASK;
|
||||
} while (--attempts && count);
|
||||
|
||||
if (attempts == 0) {
|
||||
DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
out:
|
||||
if (disable_wa)
|
||||
hsw_enable_ips(crtc_state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_dp_sink_crc_start(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
u8 buf;
|
||||
int ret;
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
|
||||
return -EIO;
|
||||
|
||||
if (!(buf & DP_TEST_CRC_SUPPORTED))
|
||||
return -ENOTTY;
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
|
||||
return -EIO;
|
||||
|
||||
if (buf & DP_TEST_SINK_START) {
|
||||
ret = intel_dp_sink_crc_stop(intel_dp, crtc_state, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
hsw_disable_ips(crtc_state);
|
||||
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
|
||||
buf | DP_TEST_SINK_START) < 0) {
|
||||
hsw_enable_ips(crtc_state);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int intel_dp_sink_crc(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, u8 *crc)
|
||||
{
|
||||
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
|
||||
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
|
||||
u8 buf;
|
||||
int count, ret;
|
||||
int attempts = 6;
|
||||
|
||||
ret = intel_dp_sink_crc_start(intel_dp, crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
do {
|
||||
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
|
||||
|
||||
if (drm_dp_dpcd_readb(&intel_dp->aux,
|
||||
DP_TEST_SINK_MISC, &buf) < 0) {
|
||||
ret = -EIO;
|
||||
goto stop;
|
||||
}
|
||||
count = buf & DP_TEST_COUNT_MASK;
|
||||
|
||||
} while (--attempts && count == 0);
|
||||
|
||||
if (attempts == 0) {
|
||||
DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
|
||||
ret = -ETIMEDOUT;
|
||||
goto stop;
|
||||
}
|
||||
|
||||
if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
|
||||
ret = -EIO;
|
||||
goto stop;
|
||||
}
|
||||
|
||||
stop:
|
||||
intel_dp_sink_crc_stop(intel_dp, crtc_state, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
|
||||
{
|
||||
|
@ -4563,14 +4409,7 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
|
|||
static enum drm_connector_status
|
||||
edp_detect(struct intel_dp *intel_dp)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
|
||||
enum drm_connector_status status;
|
||||
|
||||
status = intel_panel_detect(dev_priv);
|
||||
if (status == connector_status_unknown)
|
||||
status = connector_status_connected;
|
||||
|
||||
return status;
|
||||
return connector_status_connected;
|
||||
}
|
||||
|
||||
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
|
||||
|
@ -4833,7 +4672,7 @@ intel_dp_long_pulse(struct intel_connector *connector)
|
|||
|
||||
intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
|
||||
|
||||
/* Can't disconnect eDP, but you can close the lid... */
|
||||
/* Can't disconnect eDP */
|
||||
if (intel_dp_is_edp(intel_dp))
|
||||
status = edp_detect(intel_dp);
|
||||
else if (intel_digital_port_connected(&dp_to_dig_port(intel_dp)->base))
|
||||
|
@ -6508,7 +6347,6 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
|
|||
intel_encoder->port = port;
|
||||
|
||||
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
|
||||
dev_priv->hotplug.irq_port[port] = intel_dig_port;
|
||||
|
||||
if (port != PORT_A)
|
||||
intel_infoframe_init(intel_dig_port);
|
||||
|
@ -6527,37 +6365,44 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
|
|||
return false;
|
||||
}
|
||||
|
||||
void intel_dp_mst_suspend(struct drm_device *dev)
|
||||
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int i;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
/* disable MST */
|
||||
for (i = 0; i < I915_MAX_PORTS; i++) {
|
||||
struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp;
|
||||
|
||||
if (!intel_dig_port || !intel_dig_port->dp.can_mst)
|
||||
if (encoder->type != INTEL_OUTPUT_DDI)
|
||||
continue;
|
||||
|
||||
if (intel_dig_port->dp.is_mst)
|
||||
drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
if (!intel_dp->can_mst)
|
||||
continue;
|
||||
|
||||
if (intel_dp->is_mst)
|
||||
drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_dp_mst_resume(struct drm_device *dev)
|
||||
void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
int i;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
for (i = 0; i < I915_MAX_PORTS; i++) {
|
||||
struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
struct intel_dp *intel_dp;
|
||||
int ret;
|
||||
|
||||
if (!intel_dig_port || !intel_dig_port->dp.can_mst)
|
||||
if (encoder->type != INTEL_OUTPUT_DDI)
|
||||
continue;
|
||||
|
||||
ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
|
||||
intel_dp = enc_to_intel_dp(&encoder->base);
|
||||
|
||||
if (!intel_dp->can_mst)
|
||||
continue;
|
||||
|
||||
ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
|
||||
if (ret)
|
||||
intel_dp_check_mst_status(&intel_dig_port->dp);
|
||||
intel_dp_check_mst_status(intel_dp);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -304,6 +304,8 @@ struct intel_panel {
|
|||
} backlight;
|
||||
};
|
||||
|
||||
struct intel_digital_port;
|
||||
|
||||
/*
|
||||
* This structure serves as a translation layer between the generic HDCP code
|
||||
* and the bus-specific code. What that means is that HDCP over HDMI differs
|
||||
|
@ -1246,23 +1248,29 @@ intel_attached_encoder(struct drm_connector *connector)
|
|||
return to_intel_connector(connector)->encoder;
|
||||
}
|
||||
|
||||
static inline bool intel_encoder_is_dig_port(struct intel_encoder *encoder)
|
||||
{
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_DDI:
|
||||
case INTEL_OUTPUT_DP:
|
||||
case INTEL_OUTPUT_EDP:
|
||||
case INTEL_OUTPUT_HDMI:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct intel_digital_port *
|
||||
enc_to_dig_port(struct drm_encoder *encoder)
|
||||
{
|
||||
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
|
||||
|
||||
switch (intel_encoder->type) {
|
||||
case INTEL_OUTPUT_DDI:
|
||||
WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
|
||||
/* fall through */
|
||||
case INTEL_OUTPUT_DP:
|
||||
case INTEL_OUTPUT_EDP:
|
||||
case INTEL_OUTPUT_HDMI:
|
||||
if (intel_encoder_is_dig_port(intel_encoder))
|
||||
return container_of(encoder, struct intel_digital_port,
|
||||
base.base);
|
||||
default:
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct intel_dp_mst_encoder *
|
||||
|
@ -1276,6 +1284,20 @@ static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
|
|||
return &enc_to_dig_port(encoder)->dp;
|
||||
}
|
||||
|
||||
static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
|
||||
{
|
||||
switch (encoder->type) {
|
||||
case INTEL_OUTPUT_DP:
|
||||
case INTEL_OUTPUT_EDP:
|
||||
return true;
|
||||
case INTEL_OUTPUT_DDI:
|
||||
/* Skip pure HDMI/DVI DDI encoders */
|
||||
return i915_mmio_reg_valid(enc_to_intel_dp(&encoder->base)->output_reg);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct intel_digital_port *
|
||||
dp_to_dig_port(struct intel_dp *intel_dp)
|
||||
{
|
||||
|
@ -1332,9 +1354,6 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv);
|
|||
void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* i915_irq.c */
|
||||
bool gen11_reset_one_iir(struct drm_i915_private * const i915,
|
||||
const unsigned int bank,
|
||||
const unsigned int bit);
|
||||
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
|
||||
void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask);
|
||||
|
@ -1385,8 +1404,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
|
|||
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
|
||||
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
|
||||
enum transcoder cpu_transcoder);
|
||||
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
|
||||
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
|
||||
|
@ -1665,8 +1683,6 @@ void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
|
|||
void intel_dp_encoder_reset(struct drm_encoder *encoder);
|
||||
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
|
||||
void intel_dp_encoder_destroy(struct drm_encoder *encoder);
|
||||
int intel_dp_sink_crc(struct intel_dp *intel_dp,
|
||||
struct intel_crtc_state *crtc_state, u8 *crc);
|
||||
bool intel_dp_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config,
|
||||
struct drm_connector_state *conn_state);
|
||||
|
@ -1680,8 +1696,8 @@ void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
|
|||
void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
|
||||
void intel_edp_panel_on(struct intel_dp *intel_dp);
|
||||
void intel_edp_panel_off(struct intel_dp *intel_dp);
|
||||
void intel_dp_mst_suspend(struct drm_device *dev);
|
||||
void intel_dp_mst_resume(struct drm_device *dev);
|
||||
void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
|
||||
void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
|
||||
int intel_dp_max_link_rate(struct intel_dp *intel_dp);
|
||||
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
|
||||
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
|
||||
|
@ -1874,7 +1890,6 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
|
|||
const struct drm_connector_state *conn_state);
|
||||
void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
|
||||
void intel_panel_destroy_backlight(struct drm_connector *connector);
|
||||
enum drm_connector_status intel_panel_detect(struct drm_i915_private *dev_priv);
|
||||
extern struct drm_display_mode *intel_find_panel_downclock(
|
||||
struct drm_i915_private *dev_priv,
|
||||
struct drm_display_mode *fixed_mode,
|
||||
|
@ -1923,7 +1938,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
|
|||
void intel_psr_irq_control(struct drm_i915_private *dev_priv, bool debug);
|
||||
void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
|
||||
void intel_psr_short_pulse(struct intel_dp *intel_dp);
|
||||
int intel_psr_wait_for_idle(struct drm_i915_private *dev_priv);
|
||||
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state);
|
||||
|
||||
/* intel_runtime_pm.c */
|
||||
int intel_power_domains_init(struct drm_i915_private *);
|
||||
|
|
|
@ -467,8 +467,7 @@ static void intel_engine_init_execlist(struct intel_engine_cs *engine)
|
|||
GEM_BUG_ON(execlists_num_ports(execlists) > EXECLIST_MAX_PORTS);
|
||||
|
||||
execlists->queue_priority = INT_MIN;
|
||||
execlists->queue = RB_ROOT;
|
||||
execlists->first = NULL;
|
||||
execlists->queue = RB_ROOT_CACHED;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -990,21 +989,23 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
|
|||
|
||||
/* Waiting to drain ELSP? */
|
||||
if (READ_ONCE(engine->execlists.active)) {
|
||||
struct intel_engine_execlists *execlists = &engine->execlists;
|
||||
struct tasklet_struct *t = &engine->execlists.tasklet;
|
||||
|
||||
local_bh_disable();
|
||||
if (tasklet_trylock(&execlists->tasklet)) {
|
||||
execlists->tasklet.func(execlists->tasklet.data);
|
||||
tasklet_unlock(&execlists->tasklet);
|
||||
if (tasklet_trylock(t)) {
|
||||
/* Must wait for any GPU reset in progress. */
|
||||
if (__tasklet_is_enabled(t))
|
||||
t->func(t->data);
|
||||
tasklet_unlock(t);
|
||||
}
|
||||
local_bh_enable();
|
||||
|
||||
if (READ_ONCE(execlists->active))
|
||||
if (READ_ONCE(engine->execlists.active))
|
||||
return false;
|
||||
}
|
||||
|
||||
/* ELSP is empty, but there are ready requests? E.g. after reset */
|
||||
if (READ_ONCE(engine->execlists.first))
|
||||
if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
|
||||
return false;
|
||||
|
||||
/* Ring stopped? */
|
||||
|
@ -1540,7 +1541,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
|
|||
last = NULL;
|
||||
count = 0;
|
||||
drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
|
||||
for (rb = execlists->first; rb; rb = rb_next(rb)) {
|
||||
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
|
||||
struct i915_priolist *p =
|
||||
rb_entry(rb, typeof(*p), node);
|
||||
|
||||
|
|
|
@ -466,11 +466,13 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
|
|||
* could happen that GuC sets the bit for 2nd interrupt but Host
|
||||
* clears out the bit on handling the 1st interrupt.
|
||||
*/
|
||||
disable_rpm_wakeref_asserts(dev_priv);
|
||||
spin_lock(&guc->irq_lock);
|
||||
val = I915_READ(SOFT_SCRATCH(15));
|
||||
msg = val & guc->msg_enabled_mask;
|
||||
I915_WRITE(SOFT_SCRATCH(15), val & ~msg);
|
||||
spin_unlock(&guc->irq_lock);
|
||||
enable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
intel_guc_to_host_process_recv_msg(guc, msg);
|
||||
}
|
||||
|
|
|
@ -628,13 +628,14 @@ static void complete_preempt_context(struct intel_engine_cs *engine)
|
|||
|
||||
GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
|
||||
|
||||
if (inject_preempt_hang(execlists))
|
||||
return;
|
||||
|
||||
execlists_cancel_port_requests(execlists);
|
||||
execlists_unwind_incomplete_requests(execlists);
|
||||
|
||||
wait_for_guc_preempt_report(engine);
|
||||
intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
|
||||
|
||||
execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -695,9 +696,6 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
|
|||
|
||||
lockdep_assert_held(&engine->timeline.lock);
|
||||
|
||||
rb = execlists->first;
|
||||
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
|
||||
|
||||
if (port_isset(port)) {
|
||||
if (intel_engine_has_preemption(engine)) {
|
||||
struct guc_preempt_work *preempt_work =
|
||||
|
@ -719,7 +717,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
|
|||
}
|
||||
GEM_BUG_ON(port_isset(port));
|
||||
|
||||
while (rb) {
|
||||
while ((rb = rb_first_cached(&execlists->queue))) {
|
||||
struct i915_priolist *p = to_priolist(rb);
|
||||
struct i915_request *rq, *rn;
|
||||
|
||||
|
@ -744,15 +742,13 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
|
|||
submit = true;
|
||||
}
|
||||
|
||||
rb = rb_next(rb);
|
||||
rb_erase(&p->node, &execlists->queue);
|
||||
rb_erase_cached(&p->node, &execlists->queue);
|
||||
INIT_LIST_HEAD(&p->requests);
|
||||
if (p->priority != I915_PRIORITY_NORMAL)
|
||||
kmem_cache_free(engine->i915->priorities, p);
|
||||
}
|
||||
done:
|
||||
execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
|
||||
execlists->first = rb;
|
||||
if (submit)
|
||||
port_assign(port, last);
|
||||
if (last)
|
||||
|
@ -761,7 +757,8 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
|
|||
/* We must always keep the beast fed if we have work piled up */
|
||||
GEM_BUG_ON(port_isset(execlists->port) &&
|
||||
!execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
|
||||
GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
|
||||
GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
|
||||
!port_isset(execlists->port));
|
||||
|
||||
return submit;
|
||||
}
|
||||
|
@ -914,8 +911,12 @@ static void guc_clients_doorbell_fini(struct intel_guc *guc)
|
|||
__update_doorbell_desc(guc->preempt_client,
|
||||
GUC_DOORBELL_INVALID);
|
||||
}
|
||||
__destroy_doorbell(guc->execbuf_client);
|
||||
__update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID);
|
||||
|
||||
if (guc->execbuf_client) {
|
||||
__destroy_doorbell(guc->execbuf_client);
|
||||
__update_doorbell_desc(guc->execbuf_client,
|
||||
GUC_DOORBELL_INVALID);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1128,7 +1129,8 @@ static void guc_clients_destroy(struct intel_guc *guc)
|
|||
guc_client_free(client);
|
||||
|
||||
client = fetch_and_zero(&guc->execbuf_client);
|
||||
guc_client_free(client);
|
||||
if (client)
|
||||
guc_client_free(client);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1183,7 +1185,8 @@ void intel_guc_submission_fini(struct intel_guc *guc)
|
|||
guc_clients_destroy(guc);
|
||||
WARN_ON(!guc_verify_doorbells(guc));
|
||||
|
||||
guc_stage_desc_pool_destroy(guc);
|
||||
if (guc->stage_desc_pool)
|
||||
guc_stage_desc_pool_destroy(guc);
|
||||
}
|
||||
|
||||
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
|
||||
|
@ -1266,6 +1269,31 @@ static void guc_submission_unpark(struct intel_engine_cs *engine)
|
|||
intel_engine_pin_breadcrumbs_irq(engine);
|
||||
}
|
||||
|
||||
static void guc_set_default_submission(struct intel_engine_cs *engine)
|
||||
{
|
||||
/*
|
||||
* We inherit a bunch of functions from execlists that we'd like
|
||||
* to keep using:
|
||||
*
|
||||
* engine->submit_request = execlists_submit_request;
|
||||
* engine->cancel_requests = execlists_cancel_requests;
|
||||
* engine->schedule = execlists_schedule;
|
||||
*
|
||||
* But we need to override the actual submission backend in order
|
||||
* to talk to the GuC.
|
||||
*/
|
||||
intel_execlists_set_default_submission(engine);
|
||||
|
||||
engine->execlists.tasklet.func = guc_submission_tasklet;
|
||||
|
||||
engine->park = guc_submission_park;
|
||||
engine->unpark = guc_submission_unpark;
|
||||
|
||||
engine->reset.prepare = guc_reset_prepare;
|
||||
|
||||
engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
|
||||
}
|
||||
|
||||
int intel_guc_submission_enable(struct intel_guc *guc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
|
@ -1304,17 +1332,8 @@ int intel_guc_submission_enable(struct intel_guc *guc)
|
|||
guc_interrupts_capture(dev_priv);
|
||||
|
||||
for_each_engine(engine, dev_priv, id) {
|
||||
struct intel_engine_execlists * const execlists =
|
||||
&engine->execlists;
|
||||
|
||||
execlists->tasklet.func = guc_submission_tasklet;
|
||||
|
||||
engine->reset.prepare = guc_reset_prepare;
|
||||
|
||||
engine->park = guc_submission_park;
|
||||
engine->unpark = guc_submission_unpark;
|
||||
|
||||
engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
|
||||
engine->set_default_submission = guc_set_default_submission;
|
||||
engine->set_default_submission(engine);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1328,9 +1347,6 @@ void intel_guc_submission_disable(struct intel_guc *guc)
|
|||
|
||||
guc_interrupts_release(dev_priv);
|
||||
guc_clients_doorbell_fini(guc);
|
||||
|
||||
/* Revert back to manual ELSP submission */
|
||||
intel_engines_reset_default_submission(dev_priv);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
|
|
|
@ -92,6 +92,9 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
|
|||
{
|
||||
int ret;
|
||||
|
||||
if (i915_inject_load_failure())
|
||||
return -ENODEV;
|
||||
|
||||
if (!i915_modparams.enable_gvt) {
|
||||
DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
|
||||
return 0;
|
||||
|
|
|
@ -76,37 +76,6 @@
|
|||
* it will use i915_hotplug_work_func where this logic is handled.
|
||||
*/
|
||||
|
||||
/**
|
||||
* intel_hpd_port - return port hard associated with certain pin.
|
||||
* @dev_priv: private driver data pointer
|
||||
* @pin: the hpd pin to get associated port
|
||||
*
|
||||
* Return port that is associatade with @pin and PORT_NONE if no port is
|
||||
* hard associated with that @pin.
|
||||
*/
|
||||
enum port intel_hpd_pin_to_port(struct drm_i915_private *dev_priv,
|
||||
enum hpd_pin pin)
|
||||
{
|
||||
switch (pin) {
|
||||
case HPD_PORT_A:
|
||||
return PORT_A;
|
||||
case HPD_PORT_B:
|
||||
return PORT_B;
|
||||
case HPD_PORT_C:
|
||||
return PORT_C;
|
||||
case HPD_PORT_D:
|
||||
return PORT_D;
|
||||
case HPD_PORT_E:
|
||||
if (IS_CNL_WITH_PORT_F(dev_priv))
|
||||
return PORT_F;
|
||||
return PORT_E;
|
||||
case HPD_PORT_F:
|
||||
return PORT_F;
|
||||
default:
|
||||
return PORT_NONE; /* no port for this pin */
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_hpd_pin_default - return default pin associated with certain port.
|
||||
* @dev_priv: private driver data pointer
|
||||
|
@ -241,25 +210,25 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
|
|||
container_of(work, typeof(*dev_priv),
|
||||
hotplug.reenable_work.work);
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
int i;
|
||||
enum hpd_pin pin;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
for_each_hpd_pin(i) {
|
||||
for_each_hpd_pin(pin) {
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
|
||||
if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
|
||||
if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
|
||||
continue;
|
||||
|
||||
dev_priv->hotplug.stats[i].state = HPD_ENABLED;
|
||||
dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
|
||||
|
||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||
drm_for_each_connector_iter(connector, &conn_iter) {
|
||||
struct intel_connector *intel_connector = to_intel_connector(connector);
|
||||
|
||||
if (intel_connector->encoder->hpd_pin == i) {
|
||||
if (intel_connector->encoder->hpd_pin == pin) {
|
||||
if (connector->polled != intel_connector->polled)
|
||||
DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
|
||||
connector->name);
|
||||
|
@ -301,13 +270,18 @@ bool intel_encoder_hotplug(struct intel_encoder *encoder,
|
|||
return true;
|
||||
}
|
||||
|
||||
static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder)
|
||||
{
|
||||
return intel_encoder_is_dig_port(encoder) &&
|
||||
enc_to_dig_port(&encoder->base)->hpd_pulse != NULL;
|
||||
}
|
||||
|
||||
static void i915_digport_work_func(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, struct drm_i915_private, hotplug.dig_port_work);
|
||||
u32 long_port_mask, short_port_mask;
|
||||
struct intel_digital_port *intel_dig_port;
|
||||
int i;
|
||||
struct intel_encoder *encoder;
|
||||
u32 old_bits = 0;
|
||||
|
||||
spin_lock_irq(&dev_priv->irq_lock);
|
||||
|
@ -317,27 +291,27 @@ static void i915_digport_work_func(struct work_struct *work)
|
|||
dev_priv->hotplug.short_port_mask = 0;
|
||||
spin_unlock_irq(&dev_priv->irq_lock);
|
||||
|
||||
for (i = 0; i < I915_MAX_PORTS; i++) {
|
||||
bool valid = false;
|
||||
bool long_hpd = false;
|
||||
intel_dig_port = dev_priv->hotplug.irq_port[i];
|
||||
if (!intel_dig_port || !intel_dig_port->hpd_pulse)
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
struct intel_digital_port *dig_port;
|
||||
enum port port = encoder->port;
|
||||
bool long_hpd, short_hpd;
|
||||
enum irqreturn ret;
|
||||
|
||||
if (!intel_encoder_has_hpd_pulse(encoder))
|
||||
continue;
|
||||
|
||||
if (long_port_mask & (1 << i)) {
|
||||
valid = true;
|
||||
long_hpd = true;
|
||||
} else if (short_port_mask & (1 << i))
|
||||
valid = true;
|
||||
long_hpd = long_port_mask & BIT(port);
|
||||
short_hpd = short_port_mask & BIT(port);
|
||||
|
||||
if (valid) {
|
||||
enum irqreturn ret;
|
||||
if (!long_hpd && !short_hpd)
|
||||
continue;
|
||||
|
||||
ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
|
||||
if (ret == IRQ_NONE) {
|
||||
/* fall back to old school hpd */
|
||||
old_bits |= (1 << intel_dig_port->base.hpd_pin);
|
||||
}
|
||||
dig_port = enc_to_dig_port(&encoder->base);
|
||||
|
||||
ret = dig_port->hpd_pulse(dig_port, long_hpd);
|
||||
if (ret == IRQ_NONE) {
|
||||
/* fall back to old school hpd */
|
||||
old_bits |= BIT(encoder->hpd_pin);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -418,26 +392,24 @@ static void i915_hotplug_work_func(struct work_struct *work)
|
|||
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
||||
u32 pin_mask, u32 long_mask)
|
||||
{
|
||||
int i;
|
||||
enum port port;
|
||||
struct intel_encoder *encoder;
|
||||
bool storm_detected = false;
|
||||
bool queue_dig = false, queue_hp = false;
|
||||
bool is_dig_port;
|
||||
|
||||
if (!pin_mask)
|
||||
return;
|
||||
|
||||
spin_lock(&dev_priv->irq_lock);
|
||||
for_each_hpd_pin(i) {
|
||||
if (!(BIT(i) & pin_mask))
|
||||
for_each_intel_encoder(&dev_priv->drm, encoder) {
|
||||
enum hpd_pin pin = encoder->hpd_pin;
|
||||
bool has_hpd_pulse = intel_encoder_has_hpd_pulse(encoder);
|
||||
|
||||
if (!(BIT(pin) & pin_mask))
|
||||
continue;
|
||||
|
||||
port = intel_hpd_pin_to_port(dev_priv, i);
|
||||
is_dig_port = port != PORT_NONE &&
|
||||
dev_priv->hotplug.irq_port[port];
|
||||
|
||||
if (is_dig_port) {
|
||||
bool long_hpd = long_mask & BIT(i);
|
||||
if (has_hpd_pulse) {
|
||||
bool long_hpd = long_mask & BIT(pin);
|
||||
enum port port = encoder->port;
|
||||
|
||||
DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port),
|
||||
long_hpd ? "long" : "short");
|
||||
|
@ -455,7 +427,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
|||
}
|
||||
}
|
||||
|
||||
if (dev_priv->hotplug.stats[i].state == HPD_DISABLED) {
|
||||
if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED) {
|
||||
/*
|
||||
* On GMCH platforms the interrupt mask bits only
|
||||
* prevent irq generation, not the setting of the
|
||||
|
@ -463,20 +435,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
|
|||
* interrupts on saner platforms.
|
||||
*/
|
||||
WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
|
||||
"Received HPD interrupt on pin %d although disabled\n", i);
|
||||
"Received HPD interrupt on pin %d although disabled\n", pin);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (dev_priv->hotplug.stats[i].state != HPD_ENABLED)
|
||||
if (dev_priv->hotplug.stats[pin].state != HPD_ENABLED)
|
||||
continue;
|
||||
|
||||
if (!is_dig_port) {
|
||||
dev_priv->hotplug.event_bits |= BIT(i);
|
||||
if (!has_hpd_pulse) {
|
||||
dev_priv->hotplug.event_bits |= BIT(pin);
|
||||
queue_hp = true;
|
||||
}
|
||||
|
||||
if (intel_hpd_irq_storm_detect(dev_priv, i)) {
|
||||
dev_priv->hotplug.event_bits &= ~BIT(i);
|
||||
if (intel_hpd_irq_storm_detect(dev_priv, pin)) {
|
||||
dev_priv->hotplug.event_bits &= ~BIT(pin);
|
||||
storm_detected = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -361,15 +361,39 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline
|
||||
unsigned int gmbus_max_xfer_size(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return INTEL_GEN(dev_priv) >= 9 ? GEN9_GMBUS_BYTE_COUNT_MAX :
|
||||
GMBUS_BYTE_COUNT_MAX;
|
||||
}
|
||||
|
||||
static int
|
||||
gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
|
||||
unsigned short addr, u8 *buf, unsigned int len,
|
||||
u32 gmbus1_index)
|
||||
u32 gmbus0_reg, u32 gmbus1_index)
|
||||
{
|
||||
unsigned int size = len;
|
||||
bool burst_read = len > gmbus_max_xfer_size(dev_priv);
|
||||
bool extra_byte_added = false;
|
||||
|
||||
if (burst_read) {
|
||||
/*
|
||||
* As per HW Spec, for 512Bytes need to read extra Byte and
|
||||
* Ignore the extra byte read.
|
||||
*/
|
||||
if (len == 512) {
|
||||
extra_byte_added = true;
|
||||
len++;
|
||||
}
|
||||
size = len % 256 + 256;
|
||||
I915_WRITE_FW(GMBUS0, gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
|
||||
}
|
||||
|
||||
I915_WRITE_FW(GMBUS1,
|
||||
gmbus1_index |
|
||||
GMBUS_CYCLE_WAIT |
|
||||
(len << GMBUS_BYTE_COUNT_SHIFT) |
|
||||
(size << GMBUS_BYTE_COUNT_SHIFT) |
|
||||
(addr << GMBUS_SLAVE_ADDR_SHIFT) |
|
||||
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
|
||||
while (len) {
|
||||
|
@ -382,17 +406,34 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
|
|||
|
||||
val = I915_READ_FW(GMBUS3);
|
||||
do {
|
||||
if (extra_byte_added && len == 1)
|
||||
break;
|
||||
|
||||
*buf++ = val & 0xff;
|
||||
val >>= 8;
|
||||
} while (--len && ++loop < 4);
|
||||
|
||||
if (burst_read && len == size - 4)
|
||||
/* Reset the override bit */
|
||||
I915_WRITE_FW(GMBUS0, gmbus0_reg);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* HW spec says that 512Bytes in Burst read need special treatment.
|
||||
* But it doesn't talk about other multiple of 256Bytes. And couldn't locate
|
||||
* an I2C slave, which supports such a lengthy burst read too for experiments.
|
||||
*
|
||||
* So until things get clarified on HW support, to avoid the burst read length
|
||||
* in fold of 256Bytes except 512, max burst read length is fixed at 767Bytes.
|
||||
*/
|
||||
#define INTEL_GMBUS_BURST_READ_MAX_LEN 767U
|
||||
|
||||
static int
|
||||
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
|
||||
u32 gmbus1_index)
|
||||
u32 gmbus0_reg, u32 gmbus1_index)
|
||||
{
|
||||
u8 *buf = msg->buf;
|
||||
unsigned int rx_size = msg->len;
|
||||
|
@ -400,10 +441,13 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
|
|||
int ret;
|
||||
|
||||
do {
|
||||
len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
|
||||
if (HAS_GMBUS_BURST_READ(dev_priv))
|
||||
len = min(rx_size, INTEL_GMBUS_BURST_READ_MAX_LEN);
|
||||
else
|
||||
len = min(rx_size, gmbus_max_xfer_size(dev_priv));
|
||||
|
||||
ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
|
||||
buf, len, gmbus1_index);
|
||||
ret = gmbus_xfer_read_chunk(dev_priv, msg->addr, buf, len,
|
||||
gmbus0_reg, gmbus1_index);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -462,7 +506,7 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
|
|||
int ret;
|
||||
|
||||
do {
|
||||
len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
|
||||
len = min(tx_size, gmbus_max_xfer_size(dev_priv));
|
||||
|
||||
ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len,
|
||||
gmbus1_index);
|
||||
|
@ -491,7 +535,8 @@ gmbus_is_index_xfer(struct i2c_msg *msgs, int i, int num)
|
|||
}
|
||||
|
||||
static int
|
||||
gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
|
||||
gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
|
||||
u32 gmbus0_reg)
|
||||
{
|
||||
u32 gmbus1_index = 0;
|
||||
u32 gmbus5 = 0;
|
||||
|
@ -509,7 +554,8 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
|
|||
I915_WRITE_FW(GMBUS5, gmbus5);
|
||||
|
||||
if (msgs[1].flags & I2C_M_RD)
|
||||
ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
|
||||
ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
|
||||
gmbus1_index);
|
||||
else
|
||||
ret = gmbus_xfer_write(dev_priv, &msgs[1], gmbus1_index);
|
||||
|
||||
|
@ -544,10 +590,12 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
|
|||
for (; i < num; i += inc) {
|
||||
inc = 1;
|
||||
if (gmbus_is_index_xfer(msgs, i, num)) {
|
||||
ret = gmbus_index_xfer(dev_priv, &msgs[i]);
|
||||
ret = gmbus_index_xfer(dev_priv, &msgs[i],
|
||||
gmbus0_source | bus->reg0);
|
||||
inc = 2; /* an index transmission is two msgs */
|
||||
} else if (msgs[i].flags & I2C_M_RD) {
|
||||
ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
|
||||
ret = gmbus_xfer_read(dev_priv, &msgs[i],
|
||||
gmbus0_source | bus->reg0, 0);
|
||||
} else {
|
||||
ret = gmbus_xfer_write(dev_priv, &msgs[i], 0);
|
||||
}
|
||||
|
|
|
@ -273,7 +273,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
|
|||
find_priolist:
|
||||
/* most positive priority is scheduled first, equal priorities fifo */
|
||||
rb = NULL;
|
||||
parent = &execlists->queue.rb_node;
|
||||
parent = &execlists->queue.rb_root.rb_node;
|
||||
while (*parent) {
|
||||
rb = *parent;
|
||||
p = to_priolist(rb);
|
||||
|
@ -311,10 +311,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
|
|||
p->priority = prio;
|
||||
INIT_LIST_HEAD(&p->requests);
|
||||
rb_link_node(&p->node, rb, parent);
|
||||
rb_insert_color(&p->node, &execlists->queue);
|
||||
|
||||
if (first)
|
||||
execlists->first = &p->node;
|
||||
rb_insert_color_cached(&p->node, &execlists->queue, first);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
@ -455,6 +452,16 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
|
|||
struct execlist_port *port = execlists->port;
|
||||
unsigned int n;
|
||||
|
||||
/*
|
||||
* We can skip acquiring intel_runtime_pm_get() here as it was taken
|
||||
* on our behalf by the request (see i915_gem_mark_busy()) and it will
|
||||
* not be relinquished until the device is idle (see
|
||||
* i915_gem_idle_work_handler()). As a precaution, we make sure
|
||||
* that all ELSP are drained i.e. we have processed the CSB,
|
||||
* before allowing ourselves to idle and calling intel_runtime_pm_put().
|
||||
*/
|
||||
GEM_BUG_ON(!engine->i915->gt.awake);
|
||||
|
||||
/*
|
||||
* ELSQ note: the submit queue is not cleared after being submitted
|
||||
* to the HW so we need to make sure we always clean it up. This is
|
||||
|
@ -562,12 +569,13 @@ static void complete_preempt_context(struct intel_engine_execlists *execlists)
|
|||
{
|
||||
GEM_BUG_ON(!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT));
|
||||
|
||||
if (inject_preempt_hang(execlists))
|
||||
return;
|
||||
|
||||
execlists_cancel_port_requests(execlists);
|
||||
__unwind_incomplete_requests(container_of(execlists,
|
||||
struct intel_engine_cs,
|
||||
execlists));
|
||||
|
||||
execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
|
||||
}
|
||||
|
||||
static void execlists_dequeue(struct intel_engine_cs *engine)
|
||||
|
@ -602,9 +610,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||
* and context switches) submission.
|
||||
*/
|
||||
|
||||
rb = execlists->first;
|
||||
GEM_BUG_ON(rb_first(&execlists->queue) != rb);
|
||||
|
||||
if (last) {
|
||||
/*
|
||||
* Don't resubmit or switch until all outstanding
|
||||
|
@ -666,7 +671,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||
last->tail = last->wa_tail;
|
||||
}
|
||||
|
||||
while (rb) {
|
||||
while ((rb = rb_first_cached(&execlists->queue))) {
|
||||
struct i915_priolist *p = to_priolist(rb);
|
||||
struct i915_request *rq, *rn;
|
||||
|
||||
|
@ -725,8 +730,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||
submit = true;
|
||||
}
|
||||
|
||||
rb = rb_next(rb);
|
||||
rb_erase(&p->node, &execlists->queue);
|
||||
rb_erase_cached(&p->node, &execlists->queue);
|
||||
INIT_LIST_HEAD(&p->requests);
|
||||
if (p->priority != I915_PRIORITY_NORMAL)
|
||||
kmem_cache_free(engine->i915->priorities, p);
|
||||
|
@ -752,14 +756,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
|
|||
execlists->queue_priority =
|
||||
port != execlists->port ? rq_prio(last) : INT_MIN;
|
||||
|
||||
execlists->first = rb;
|
||||
if (submit) {
|
||||
port_assign(port, last);
|
||||
execlists_submit_ports(engine);
|
||||
}
|
||||
|
||||
/* We must always keep the beast fed if we have work piled up */
|
||||
GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
|
||||
GEM_BUG_ON(rb_first_cached(&execlists->queue) &&
|
||||
!port_isset(execlists->port));
|
||||
|
||||
/* Re-evaluate the executing context setup after each preemptive kick */
|
||||
if (last)
|
||||
|
@ -799,73 +803,7 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
|
|||
port++;
|
||||
}
|
||||
|
||||
execlists_user_end(execlists);
|
||||
}
|
||||
|
||||
static void clear_gtiir(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Clear any pending interrupt state.
|
||||
*
|
||||
* We do it twice out of paranoia that some of the IIR are
|
||||
* double buffered, and so if we only reset it once there may
|
||||
* still be an interrupt pending.
|
||||
*/
|
||||
if (INTEL_GEN(dev_priv) >= 11) {
|
||||
static const struct {
|
||||
u8 bank;
|
||||
u8 bit;
|
||||
} gen11_gtiir[] = {
|
||||
[RCS] = {0, GEN11_RCS0},
|
||||
[BCS] = {0, GEN11_BCS},
|
||||
[_VCS(0)] = {1, GEN11_VCS(0)},
|
||||
[_VCS(1)] = {1, GEN11_VCS(1)},
|
||||
[_VCS(2)] = {1, GEN11_VCS(2)},
|
||||
[_VCS(3)] = {1, GEN11_VCS(3)},
|
||||
[_VECS(0)] = {1, GEN11_VECS(0)},
|
||||
[_VECS(1)] = {1, GEN11_VECS(1)},
|
||||
};
|
||||
unsigned long irqflags;
|
||||
|
||||
GEM_BUG_ON(engine->id >= ARRAY_SIZE(gen11_gtiir));
|
||||
|
||||
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
|
||||
for (i = 0; i < 2; i++) {
|
||||
gen11_reset_one_iir(dev_priv,
|
||||
gen11_gtiir[engine->id].bank,
|
||||
gen11_gtiir[engine->id].bit);
|
||||
}
|
||||
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
||||
} else {
|
||||
static const u8 gtiir[] = {
|
||||
[RCS] = 0,
|
||||
[BCS] = 0,
|
||||
[VCS] = 1,
|
||||
[VCS2] = 1,
|
||||
[VECS] = 3,
|
||||
};
|
||||
|
||||
GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
|
||||
engine->irq_keep_mask);
|
||||
POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
|
||||
}
|
||||
GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
|
||||
engine->irq_keep_mask);
|
||||
}
|
||||
}
|
||||
|
||||
static void reset_irq(struct intel_engine_cs *engine)
|
||||
{
|
||||
/* Mark all CS interrupts as complete */
|
||||
smp_store_mb(engine->execlists.active, 0);
|
||||
|
||||
clear_gtiir(engine);
|
||||
execlists_clear_all_active(execlists);
|
||||
}
|
||||
|
||||
static void reset_csb_pointers(struct intel_engine_execlists *execlists)
|
||||
|
@ -883,6 +821,11 @@ static void reset_csb_pointers(struct intel_engine_execlists *execlists)
|
|||
WRITE_ONCE(*execlists->csb_write, execlists->csb_write_reset);
|
||||
}
|
||||
|
||||
static void nop_submission_tasklet(unsigned long data)
|
||||
{
|
||||
/* The driver is wedged; don't process any more events. */
|
||||
}
|
||||
|
||||
static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
|
@ -911,7 +854,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
|||
|
||||
/* Cancel the requests on the HW and clear the ELSP tracker. */
|
||||
execlists_cancel_port_requests(execlists);
|
||||
reset_irq(engine);
|
||||
execlists_user_end(execlists);
|
||||
|
||||
/* Mark all executing requests as skipped. */
|
||||
list_for_each_entry(rq, &engine->timeline.requests, link) {
|
||||
|
@ -921,8 +864,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
|||
}
|
||||
|
||||
/* Flush the queued requests to the timeline list (for retiring). */
|
||||
rb = execlists->first;
|
||||
while (rb) {
|
||||
while ((rb = rb_first_cached(&execlists->queue))) {
|
||||
struct i915_priolist *p = to_priolist(rb);
|
||||
|
||||
list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
|
||||
|
@ -932,8 +874,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
|||
__i915_request_submit(rq);
|
||||
}
|
||||
|
||||
rb = rb_next(rb);
|
||||
rb_erase(&p->node, &execlists->queue);
|
||||
rb_erase_cached(&p->node, &execlists->queue);
|
||||
INIT_LIST_HEAD(&p->requests);
|
||||
if (p->priority != I915_PRIORITY_NORMAL)
|
||||
kmem_cache_free(engine->i915->priorities, p);
|
||||
|
@ -942,10 +883,12 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
|
|||
/* Remaining _unready_ requests will be nop'ed when submitted */
|
||||
|
||||
execlists->queue_priority = INT_MIN;
|
||||
execlists->queue = RB_ROOT;
|
||||
execlists->first = NULL;
|
||||
execlists->queue = RB_ROOT_CACHED;
|
||||
GEM_BUG_ON(port_isset(execlists->port));
|
||||
|
||||
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
|
||||
execlists->tasklet.func = nop_submission_tasklet;
|
||||
|
||||
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||
}
|
||||
|
||||
|
@ -1110,16 +1053,6 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
|
|||
{
|
||||
lockdep_assert_held(&engine->timeline.lock);
|
||||
|
||||
/*
|
||||
* We can skip acquiring intel_runtime_pm_get() here as it was taken
|
||||
* on our behalf by the request (see i915_gem_mark_busy()) and it will
|
||||
* not be relinquished until the device is idle (see
|
||||
* i915_gem_idle_work_handler()). As a precaution, we make sure
|
||||
* that all ELSP are drained i.e. we have processed the CSB,
|
||||
* before allowing ourselves to idle and calling intel_runtime_pm_put().
|
||||
*/
|
||||
GEM_BUG_ON(!engine->i915->gt.awake);
|
||||
|
||||
process_csb(engine);
|
||||
if (!execlists_is_active(&engine->execlists, EXECLISTS_ACTIVE_PREEMPT))
|
||||
execlists_dequeue(engine);
|
||||
|
@ -1140,10 +1073,7 @@ static void execlists_submission_tasklet(unsigned long data)
|
|||
engine->execlists.active);
|
||||
|
||||
spin_lock_irqsave(&engine->timeline.lock, flags);
|
||||
|
||||
if (engine->i915->gt.awake) /* we may be delayed until after we idle! */
|
||||
__execlists_submission_tasklet(engine);
|
||||
|
||||
__execlists_submission_tasklet(engine);
|
||||
spin_unlock_irqrestore(&engine->timeline.lock, flags);
|
||||
}
|
||||
|
||||
|
@ -1191,7 +1121,7 @@ static void execlists_submit_request(struct i915_request *request)
|
|||
|
||||
queue_request(engine, &request->sched, rq_prio(request));
|
||||
|
||||
GEM_BUG_ON(!engine->execlists.first);
|
||||
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
|
||||
GEM_BUG_ON(list_empty(&request->sched.link));
|
||||
|
||||
submit_queue(engine, rq_prio(request));
|
||||
|
@ -1852,7 +1782,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
|
|||
return ret;
|
||||
|
||||
intel_engine_reset_breadcrumbs(engine);
|
||||
intel_engine_init_hangcheck(engine);
|
||||
|
||||
if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) {
|
||||
struct drm_printer p = drm_debug_printer(__func__);
|
||||
|
@ -1986,7 +1915,6 @@ static void execlists_reset(struct intel_engine_cs *engine,
|
|||
* requests were completed.
|
||||
*/
|
||||
execlists_cancel_port_requests(execlists);
|
||||
reset_irq(engine);
|
||||
|
||||
/* Push back any incomplete requests for replay after the reset. */
|
||||
__unwind_incomplete_requests(engine);
|
||||
|
@ -2044,7 +1972,7 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
|
|||
struct intel_engine_execlists * const execlists = &engine->execlists;
|
||||
|
||||
/* After a GPU reset, we may have requests to replay */
|
||||
if (execlists->first)
|
||||
if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
|
||||
tasklet_schedule(&execlists->tasklet);
|
||||
|
||||
/*
|
||||
|
@ -2364,7 +2292,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
|
|||
kfree(engine);
|
||||
}
|
||||
|
||||
static void execlists_set_default_submission(struct intel_engine_cs *engine)
|
||||
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
|
||||
{
|
||||
engine->submit_request = execlists_submit_request;
|
||||
engine->cancel_requests = execlists_cancel_requests;
|
||||
|
@ -2404,7 +2332,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
|
|||
engine->emit_breadcrumb = gen8_emit_breadcrumb;
|
||||
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
|
||||
|
||||
engine->set_default_submission = execlists_set_default_submission;
|
||||
engine->set_default_submission = intel_execlists_set_default_submission;
|
||||
|
||||
if (INTEL_GEN(engine->i915) < 11) {
|
||||
engine->irq_enable = gen8_logical_ring_enable_irq;
|
||||
|
|
|
@ -104,4 +104,6 @@ struct i915_gem_context;
|
|||
|
||||
void intel_lr_context_resume(struct drm_i915_private *dev_priv);
|
||||
|
||||
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
|
||||
|
||||
#endif /* _INTEL_LRC_H_ */
|
||||
|
|
|
@ -44,8 +44,6 @@
|
|||
/* Private structure for the integrated LVDS support */
|
||||
struct intel_lvds_connector {
|
||||
struct intel_connector base;
|
||||
|
||||
struct notifier_block lid_notifier;
|
||||
};
|
||||
|
||||
struct intel_lvds_pps {
|
||||
|
@ -452,26 +450,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
|
|||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Detect the LVDS connection.
|
||||
*
|
||||
* Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means
|
||||
* connected and closed means disconnected. We also send hotplug events as
|
||||
* needed, using lid status notification from the input layer.
|
||||
*/
|
||||
static enum drm_connector_status
|
||||
intel_lvds_detect(struct drm_connector *connector, bool force)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
enum drm_connector_status status;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
status = intel_panel_detect(dev_priv);
|
||||
if (status != connector_status_unknown)
|
||||
return status;
|
||||
|
||||
return connector_status_connected;
|
||||
}
|
||||
|
||||
|
@ -496,117 +477,6 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
|
||||
{
|
||||
DRM_INFO("Skipping forced modeset for %s\n", id->ident);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* The GPU hangs up on these systems if modeset is performed on LID open */
|
||||
static const struct dmi_system_id intel_no_modeset_on_lid[] = {
|
||||
{
|
||||
.callback = intel_no_modeset_on_lid_dmi_callback,
|
||||
.ident = "Toshiba Tecra A11",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A11"),
|
||||
},
|
||||
},
|
||||
|
||||
{ } /* terminating entry */
|
||||
};
|
||||
|
||||
/*
|
||||
* Lid events. Note the use of 'modeset':
|
||||
* - we set it to MODESET_ON_LID_OPEN on lid close,
|
||||
* and set it to MODESET_DONE on open
|
||||
* - we use it as a "only once" bit (ie we ignore
|
||||
* duplicate events where it was already properly set)
|
||||
* - the suspend/resume paths will set it to
|
||||
* MODESET_SUSPENDED and ignore the lid open event,
|
||||
* because they restore the mode ("lid open").
|
||||
*/
|
||||
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
|
||||
void *unused)
|
||||
{
|
||||
struct intel_lvds_connector *lvds_connector =
|
||||
container_of(nb, struct intel_lvds_connector, lid_notifier);
|
||||
struct drm_connector *connector = &lvds_connector->base.base;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
|
||||
return NOTIFY_OK;
|
||||
|
||||
mutex_lock(&dev_priv->modeset_restore_lock);
|
||||
if (dev_priv->modeset_restore == MODESET_SUSPENDED)
|
||||
goto exit;
|
||||
/*
|
||||
* check and update the status of LVDS connector after receiving
|
||||
* the LID nofication event.
|
||||
*/
|
||||
connector->status = connector->funcs->detect(connector, false);
|
||||
|
||||
/* Don't force modeset on machines where it causes a GPU lockup */
|
||||
if (dmi_check_system(intel_no_modeset_on_lid))
|
||||
goto exit;
|
||||
if (!acpi_lid_open()) {
|
||||
/* do modeset on next lid open event */
|
||||
dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if (dev_priv->modeset_restore == MODESET_DONE)
|
||||
goto exit;
|
||||
|
||||
/*
|
||||
* Some old platform's BIOS love to wreak havoc while the lid is closed.
|
||||
* We try to detect this here and undo any damage. The split for PCH
|
||||
* platforms is rather conservative and a bit arbitrary expect that on
|
||||
* those platforms VGA disabling requires actual legacy VGA I/O access,
|
||||
* and as part of the cleanup in the hw state restore we also redisable
|
||||
* the vga plane.
|
||||
*/
|
||||
if (!HAS_PCH_SPLIT(dev_priv))
|
||||
intel_display_resume(dev);
|
||||
|
||||
dev_priv->modeset_restore = MODESET_DONE;
|
||||
|
||||
exit:
|
||||
mutex_unlock(&dev_priv->modeset_restore_lock);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int
|
||||
intel_lvds_connector_register(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_lvds_connector *lvds = to_lvds_connector(connector);
|
||||
int ret;
|
||||
|
||||
ret = intel_connector_register(connector);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
lvds->lid_notifier.notifier_call = intel_lid_notify;
|
||||
if (acpi_lid_notifier_register(&lvds->lid_notifier)) {
|
||||
DRM_DEBUG_KMS("lid notifier registration failed\n");
|
||||
lvds->lid_notifier.notifier_call = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
intel_lvds_connector_unregister(struct drm_connector *connector)
|
||||
{
|
||||
struct intel_lvds_connector *lvds = to_lvds_connector(connector);
|
||||
|
||||
if (lvds->lid_notifier.notifier_call)
|
||||
acpi_lid_notifier_unregister(&lvds->lid_notifier);
|
||||
|
||||
intel_connector_unregister(connector);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_lvds_destroy - unregister and free LVDS structures
|
||||
* @connector: connector to free
|
||||
|
@ -639,8 +509,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
|
|||
.fill_modes = drm_helper_probe_single_connector_modes,
|
||||
.atomic_get_property = intel_digital_connector_atomic_get_property,
|
||||
.atomic_set_property = intel_digital_connector_atomic_set_property,
|
||||
.late_register = intel_lvds_connector_register,
|
||||
.early_unregister = intel_lvds_connector_unregister,
|
||||
.late_register = intel_connector_register,
|
||||
.early_unregister = intel_connector_unregister,
|
||||
.destroy = intel_lvds_destroy,
|
||||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
|
||||
|
@ -1114,8 +984,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
|
|||
* 2) check for VBT data
|
||||
* 3) check to see if LVDS is already on
|
||||
* if none of the above, no panel
|
||||
* 4) make sure lid is open
|
||||
* if closed, act like it's not there for now
|
||||
*/
|
||||
|
||||
/*
|
||||
|
|
|
@ -375,26 +375,6 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
|
|||
pipe_config->gmch_pfit.lvds_border_bits = border;
|
||||
}
|
||||
|
||||
enum drm_connector_status
|
||||
intel_panel_detect(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Assume that the BIOS does not lie through the OpRegion... */
|
||||
if (!i915_modparams.panel_ignore_lid && dev_priv->opregion.lid_state) {
|
||||
return *dev_priv->opregion.lid_state & 0x1 ?
|
||||
connector_status_connected :
|
||||
connector_status_disconnected;
|
||||
}
|
||||
|
||||
switch (i915_modparams.panel_ignore_lid) {
|
||||
case -2:
|
||||
return connector_status_connected;
|
||||
case -1:
|
||||
return connector_status_disconnected;
|
||||
default:
|
||||
return connector_status_unknown;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* scale - scale values from one range to another
|
||||
* @source_val: value in range [@source_min..@source_max]
|
||||
|
|
|
@ -7347,11 +7347,11 @@ static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
|
|||
|
||||
static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (WARN_ON(!dev_priv->vlv_pctx))
|
||||
return;
|
||||
struct drm_i915_gem_object *pctx;
|
||||
|
||||
i915_gem_object_put(dev_priv->vlv_pctx);
|
||||
dev_priv->vlv_pctx = NULL;
|
||||
pctx = fetch_and_zero(&dev_priv->vlv_pctx);
|
||||
if (pctx)
|
||||
i915_gem_object_put(pctx);
|
||||
}
|
||||
|
||||
static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
|
||||
|
|
|
@ -717,11 +717,16 @@ void intel_psr_disable(struct intel_dp *intel_dp,
|
|||
cancel_work_sync(&dev_priv->psr.work);
|
||||
}
|
||||
|
||||
int intel_psr_wait_for_idle(struct drm_i915_private *dev_priv)
|
||||
int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
i915_reg_t reg;
|
||||
u32 mask;
|
||||
|
||||
if (!new_crtc_state->has_psr)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* The sole user right now is intel_pipe_update_start(),
|
||||
* which won't race with psr_enable/disable, which is
|
||||
|
@ -966,16 +971,6 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
|
|||
/* For new platforms let's respect VBT back again */
|
||||
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
|
||||
|
||||
/* Override link_standby x link_off defaults */
|
||||
if (i915_modparams.enable_psr == 2 && !dev_priv->psr.link_standby) {
|
||||
DRM_DEBUG_KMS("PSR: Forcing link standby\n");
|
||||
dev_priv->psr.link_standby = true;
|
||||
}
|
||||
if (i915_modparams.enable_psr == 3 && dev_priv->psr.link_standby) {
|
||||
DRM_DEBUG_KMS("PSR: Forcing main link off\n");
|
||||
dev_priv->psr.link_standby = false;
|
||||
}
|
||||
|
||||
INIT_WORK(&dev_priv->psr.work, intel_psr_work);
|
||||
mutex_init(&dev_priv->psr.lock);
|
||||
}
|
||||
|
|
|
@ -524,8 +524,6 @@ static int init_ring_common(struct intel_engine_cs *engine)
|
|||
goto out;
|
||||
}
|
||||
|
||||
intel_engine_init_hangcheck(engine);
|
||||
|
||||
if (INTEL_GEN(dev_priv) > 2)
|
||||
I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
|
||||
|
||||
|
@ -1089,6 +1087,7 @@ void intel_ring_unpin(struct intel_ring *ring)
|
|||
static struct i915_vma *
|
||||
intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
|
||||
{
|
||||
struct i915_address_space *vm = &dev_priv->ggtt.vm;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct i915_vma *vma;
|
||||
|
||||
|
@ -1098,10 +1097,14 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
|
|||
if (IS_ERR(obj))
|
||||
return ERR_CAST(obj);
|
||||
|
||||
/* mark ring buffers as read-only from GPU side by default */
|
||||
obj->gt_ro = 1;
|
||||
/*
|
||||
* Mark ring buffers as read-only from GPU side (so no stray overwrites)
|
||||
* if supported by the platform's GGTT.
|
||||
*/
|
||||
if (vm->has_read_only)
|
||||
i915_gem_object_set_readonly(obj);
|
||||
|
||||
vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
|
||||
vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(vma))
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -193,6 +193,11 @@ struct i915_priolist {
|
|||
int priority;
|
||||
};
|
||||
|
||||
struct st_preempt_hang {
|
||||
struct completion completion;
|
||||
bool inject_hang;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct intel_engine_execlists - execlist submission queue and port state
|
||||
*
|
||||
|
@ -292,12 +297,7 @@ struct intel_engine_execlists {
|
|||
/**
|
||||
* @queue: queue of requests, in priority lists
|
||||
*/
|
||||
struct rb_root queue;
|
||||
|
||||
/**
|
||||
* @first: leftmost level in priority @queue
|
||||
*/
|
||||
struct rb_node *first;
|
||||
struct rb_root_cached queue;
|
||||
|
||||
/**
|
||||
* @csb_read: control register for Context Switch buffer
|
||||
|
@ -338,6 +338,8 @@ struct intel_engine_execlists {
|
|||
* @csb_head: context status buffer head
|
||||
*/
|
||||
u8 csb_head;
|
||||
|
||||
I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
|
||||
};
|
||||
|
||||
#define INTEL_ENGINE_CS_MAX_NAME 8
|
||||
|
@ -688,6 +690,12 @@ execlists_clear_active(struct intel_engine_execlists *execlists,
|
|||
__clear_bit(bit, (unsigned long *)&execlists->active);
|
||||
}
|
||||
|
||||
static inline void
|
||||
execlists_clear_all_active(struct intel_engine_execlists *execlists)
|
||||
{
|
||||
execlists->active = 0;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
execlists_is_active(const struct intel_engine_execlists *execlists,
|
||||
unsigned int bit)
|
||||
|
@ -1154,4 +1162,24 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine);
|
|||
|
||||
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
|
||||
static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
|
||||
{
|
||||
if (!execlists->preempt_hang.inject_hang)
|
||||
return false;
|
||||
|
||||
complete(&execlists->preempt_hang.completion);
|
||||
return true;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _INTEL_RINGBUFFER_H_ */
|
||||
|
|
|
@ -104,7 +104,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
|
|||
* VBL interrupts will start the PSR exit and prevent a PSR
|
||||
* re-entry as well.
|
||||
*/
|
||||
if (CAN_PSR(dev_priv) && intel_psr_wait_for_idle(dev_priv))
|
||||
if (intel_psr_wait_for_idle(new_crtc_state))
|
||||
DRM_ERROR("PSR idle timed out, atomic update may fail\n");
|
||||
|
||||
local_irq_disable();
|
||||
|
|
|
@ -1347,8 +1347,7 @@ intel_tv_get_modes(struct drm_connector *connector)
|
|||
mode_ptr = drm_mode_create(connector->dev);
|
||||
if (!mode_ptr)
|
||||
continue;
|
||||
strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
|
||||
mode_ptr->name[DRM_DISPLAY_MODE_LEN - 1] = '\0';
|
||||
strlcpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
|
||||
|
||||
mode_ptr->hdisplay = hactive_s;
|
||||
mode_ptr->hsync_start = hactive_s + 1;
|
||||
|
|
|
@ -570,6 +570,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
|
|||
i915_vma_close(vma);
|
||||
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
|
||||
|
@ -597,6 +598,7 @@ static void close_object_list(struct list_head *objects,
|
|||
|
||||
list_del(&obj->st_link);
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
}
|
||||
|
@ -866,6 +868,7 @@ static int igt_mock_ppgtt_64K(void *arg)
|
|||
i915_vma_close(vma);
|
||||
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
}
|
||||
|
@ -1265,6 +1268,7 @@ static int igt_ppgtt_exhaust_huge(void *arg)
|
|||
}
|
||||
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
}
|
||||
|
@ -1326,6 +1330,7 @@ static int igt_ppgtt_internal_huge(void *arg)
|
|||
}
|
||||
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
|
||||
|
@ -1394,6 +1399,7 @@ static int igt_ppgtt_gemfs_huge(void *arg)
|
|||
}
|
||||
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
|
||||
i915_gem_object_put(obj);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
*/
|
||||
|
||||
#include "../i915_selftest.h"
|
||||
#include "i915_random.h"
|
||||
#include "igt_flush_test.h"
|
||||
|
||||
#include "mock_drm.h"
|
||||
|
@ -252,9 +253,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
|
|||
}
|
||||
|
||||
for (; m < DW_PER_PAGE; m++) {
|
||||
if (map[m] != 0xdeadbeef) {
|
||||
if (map[m] != STACK_MAGIC) {
|
||||
pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
|
||||
n, m, map[m], 0xdeadbeef);
|
||||
n, m, map[m], STACK_MAGIC);
|
||||
err = -EINVAL;
|
||||
goto out_unmap;
|
||||
}
|
||||
|
@ -310,7 +311,7 @@ create_test_object(struct i915_gem_context *ctx,
|
|||
if (err)
|
||||
return ERR_PTR(err);
|
||||
|
||||
err = cpu_fill(obj, 0xdeadbeef);
|
||||
err = cpu_fill(obj, STACK_MAGIC);
|
||||
if (err) {
|
||||
pr_err("Failed to fill object with cpu, err=%d\n",
|
||||
err);
|
||||
|
@ -432,6 +433,111 @@ static int igt_ctx_exec(void *arg)
|
|||
return err;
|
||||
}
|
||||
|
||||
static int igt_ctx_readonly(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct drm_i915_gem_object *obj = NULL;
|
||||
struct drm_file *file;
|
||||
I915_RND_STATE(prng);
|
||||
IGT_TIMEOUT(end_time);
|
||||
LIST_HEAD(objects);
|
||||
struct i915_gem_context *ctx;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
unsigned long ndwords, dw;
|
||||
int err = -ENODEV;
|
||||
|
||||
/*
|
||||
* Create a few read-only objects (with the occasional writable object)
|
||||
* and try to write into these object checking that the GPU discards
|
||||
* any write to a read-only object.
|
||||
*/
|
||||
|
||||
file = mock_file(i915);
|
||||
if (IS_ERR(file))
|
||||
return PTR_ERR(file);
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
|
||||
ctx = i915_gem_create_context(i915, file->driver_priv);
|
||||
if (IS_ERR(ctx)) {
|
||||
err = PTR_ERR(ctx);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ppgtt = ctx->ppgtt ?: i915->mm.aliasing_ppgtt;
|
||||
if (!ppgtt || !ppgtt->vm.has_read_only) {
|
||||
err = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ndwords = 0;
|
||||
dw = 0;
|
||||
while (!time_after(jiffies, end_time)) {
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned int id;
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
if (!intel_engine_can_store_dword(engine))
|
||||
continue;
|
||||
|
||||
if (!obj) {
|
||||
obj = create_test_object(ctx, file, &objects);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (prandom_u32_state(&prng) & 1)
|
||||
i915_gem_object_set_readonly(obj);
|
||||
}
|
||||
|
||||
intel_runtime_pm_get(i915);
|
||||
err = gpu_fill(obj, ctx, engine, dw);
|
||||
intel_runtime_pm_put(i915);
|
||||
if (err) {
|
||||
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
|
||||
ndwords, dw, max_dwords(obj),
|
||||
engine->name, ctx->hw_id,
|
||||
yesno(!!ctx->ppgtt), err);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (++dw == max_dwords(obj)) {
|
||||
obj = NULL;
|
||||
dw = 0;
|
||||
}
|
||||
ndwords++;
|
||||
}
|
||||
}
|
||||
pr_info("Submitted %lu dwords (across %u engines)\n",
|
||||
ndwords, INTEL_INFO(i915)->num_rings);
|
||||
|
||||
dw = 0;
|
||||
list_for_each_entry(obj, &objects, st_link) {
|
||||
unsigned int rem =
|
||||
min_t(unsigned int, ndwords - dw, max_dwords(obj));
|
||||
unsigned int num_writes;
|
||||
|
||||
num_writes = rem;
|
||||
if (i915_gem_object_is_readonly(obj))
|
||||
num_writes = 0;
|
||||
|
||||
err = cpu_check(obj, num_writes);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
dw += rem;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED))
|
||||
err = -EIO;
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
mock_file_free(i915, file);
|
||||
return err;
|
||||
}
|
||||
|
||||
static __maybe_unused const char *
|
||||
__engine_name(struct drm_i915_private *i915, unsigned int engines)
|
||||
{
|
||||
|
@ -608,6 +714,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
|
|||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(igt_switch_to_kernel_context),
|
||||
SUBTEST(igt_ctx_exec),
|
||||
SUBTEST(igt_ctx_readonly),
|
||||
};
|
||||
bool fake_alias = false;
|
||||
int err;
|
||||
|
|
|
@ -1244,6 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915,
|
|||
u64 hole_start, u64 hole_end,
|
||||
unsigned long end_time))
|
||||
{
|
||||
const u64 limit = totalram_pages << PAGE_SHIFT;
|
||||
struct i915_gem_context *ctx;
|
||||
struct i915_hw_ppgtt *ppgtt;
|
||||
IGT_TIMEOUT(end_time);
|
||||
|
@ -1256,7 +1257,7 @@ static int exercise_mock(struct drm_i915_private *i915,
|
|||
ppgtt = ctx->ppgtt;
|
||||
GEM_BUG_ON(!ppgtt);
|
||||
|
||||
err = func(i915, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
|
||||
err = func(i915, &ppgtt->vm, 0, min(ppgtt->vm.total, limit), end_time);
|
||||
|
||||
mock_context_close(ctx);
|
||||
return err;
|
||||
|
|
|
@ -375,7 +375,8 @@ static int igt_partial_tiling(void *arg)
|
|||
}
|
||||
|
||||
GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
|
||||
if (tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
|
||||
if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
|
||||
tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
|
||||
continue;
|
||||
|
||||
if (INTEL_GEN(i915) <= 2) {
|
||||
|
|
|
@ -210,6 +210,8 @@ int __i915_subtests(const char *caller,
|
|||
return -EINTR;
|
||||
|
||||
pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
|
||||
GEM_TRACE("Running %s/%s\n", caller, st->name);
|
||||
|
||||
err = st->func(data);
|
||||
if (err && err != -EINTR) {
|
||||
pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* SPDX-License-Identifier: MIT
|
||||
*
|
||||
* Copyright © 2018 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef IGT_WEDGE_ME_H
|
||||
#define IGT_WEDGE_ME_H
|
||||
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "../i915_gem.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
|
||||
struct igt_wedge_me {
|
||||
struct delayed_work work;
|
||||
struct drm_i915_private *i915;
|
||||
const char *name;
|
||||
};
|
||||
|
||||
static void __igt_wedge_me(struct work_struct *work)
|
||||
{
|
||||
struct igt_wedge_me *w = container_of(work, typeof(*w), work.work);
|
||||
|
||||
pr_err("%s timed out, cancelling test.\n", w->name);
|
||||
|
||||
GEM_TRACE("%s timed out.\n", w->name);
|
||||
GEM_TRACE_DUMP();
|
||||
|
||||
i915_gem_set_wedged(w->i915);
|
||||
}
|
||||
|
||||
static void __igt_init_wedge(struct igt_wedge_me *w,
|
||||
struct drm_i915_private *i915,
|
||||
long timeout,
|
||||
const char *name)
|
||||
{
|
||||
w->i915 = i915;
|
||||
w->name = name;
|
||||
|
||||
INIT_DELAYED_WORK_ONSTACK(&w->work, __igt_wedge_me);
|
||||
schedule_delayed_work(&w->work, timeout);
|
||||
}
|
||||
|
||||
static void __igt_fini_wedge(struct igt_wedge_me *w)
|
||||
{
|
||||
cancel_delayed_work_sync(&w->work);
|
||||
destroy_delayed_work_on_stack(&w->work);
|
||||
w->i915 = NULL;
|
||||
}
|
||||
|
||||
#define igt_wedge_on_timeout(W, DEV, TIMEOUT) \
|
||||
for (__igt_init_wedge((W), (DEV), (TIMEOUT), __func__); \
|
||||
(W)->i915; \
|
||||
__igt_fini_wedge((W)))
|
||||
|
||||
#endif /* IGT_WEDGE_ME_H */
|
|
@ -196,19 +196,23 @@ static int igt_guc_clients(void *args)
|
|||
}
|
||||
|
||||
unreserve_doorbell(guc->execbuf_client);
|
||||
err = guc_clients_doorbell_init(guc);
|
||||
|
||||
__create_doorbell(guc->execbuf_client);
|
||||
err = __guc_allocate_doorbell(guc, guc->execbuf_client->stage_id);
|
||||
if (err != -EIO) {
|
||||
pr_err("unexpected (err = %d)", err);
|
||||
goto out;
|
||||
goto out_db;
|
||||
}
|
||||
|
||||
if (!available_dbs(guc, guc->execbuf_client->priority)) {
|
||||
pr_err("doorbell not available when it should\n");
|
||||
err = -EIO;
|
||||
goto out;
|
||||
goto out_db;
|
||||
}
|
||||
|
||||
out_db:
|
||||
/* clean after test */
|
||||
__destroy_doorbell(guc->execbuf_client);
|
||||
err = reserve_doorbell(guc->execbuf_client);
|
||||
if (err) {
|
||||
pr_err("failed to reserve back the doorbell back\n");
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "../i915_selftest.h"
|
||||
#include "i915_random.h"
|
||||
#include "igt_flush_test.h"
|
||||
#include "igt_wedge_me.h"
|
||||
|
||||
#include "mock_context.h"
|
||||
#include "mock_drm.h"
|
||||
|
@ -921,7 +922,7 @@ static u32 fake_hangcheck(struct i915_request *rq, u32 mask)
|
|||
return reset_count;
|
||||
}
|
||||
|
||||
static int igt_wait_reset(void *arg)
|
||||
static int igt_reset_wait(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct i915_request *rq;
|
||||
|
@ -995,6 +996,170 @@ static int igt_wait_reset(void *arg)
|
|||
return err;
|
||||
}
|
||||
|
||||
struct evict_vma {
|
||||
struct completion completion;
|
||||
struct i915_vma *vma;
|
||||
};
|
||||
|
||||
static int evict_vma(void *data)
|
||||
{
|
||||
struct evict_vma *arg = data;
|
||||
struct i915_address_space *vm = arg->vma->vm;
|
||||
struct drm_i915_private *i915 = vm->i915;
|
||||
struct drm_mm_node evict = arg->vma->node;
|
||||
int err;
|
||||
|
||||
complete(&arg->completion);
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
err = i915_gem_evict_for_node(vm, &evict, 0);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __igt_reset_evict_vma(struct drm_i915_private *i915,
|
||||
struct i915_address_space *vm)
|
||||
{
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct task_struct *tsk = NULL;
|
||||
struct i915_request *rq;
|
||||
struct evict_vma arg;
|
||||
struct hang h;
|
||||
int err;
|
||||
|
||||
if (!intel_engine_can_store_dword(i915->engine[RCS]))
|
||||
return 0;
|
||||
|
||||
/* Check that we can recover an unbind stuck on a hanging request */
|
||||
|
||||
global_reset_lock(i915);
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
err = hang_init(&h, i915);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
|
||||
if (IS_ERR(obj)) {
|
||||
err = PTR_ERR(obj);
|
||||
goto fini;
|
||||
}
|
||||
|
||||
arg.vma = i915_vma_instance(obj, vm, NULL);
|
||||
if (IS_ERR(arg.vma)) {
|
||||
err = PTR_ERR(arg.vma);
|
||||
goto out_obj;
|
||||
}
|
||||
|
||||
rq = hang_create_request(&h, i915->engine[RCS]);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto out_obj;
|
||||
}
|
||||
|
||||
err = i915_vma_pin(arg.vma, 0, 0,
|
||||
i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER);
|
||||
if (err)
|
||||
goto out_obj;
|
||||
|
||||
err = i915_vma_move_to_active(arg.vma, rq, EXEC_OBJECT_WRITE);
|
||||
i915_vma_unpin(arg.vma);
|
||||
|
||||
i915_request_get(rq);
|
||||
i915_request_add(rq);
|
||||
if (err)
|
||||
goto out_rq;
|
||||
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
|
||||
if (!wait_until_running(&h, rq)) {
|
||||
struct drm_printer p = drm_info_printer(i915->drm.dev);
|
||||
|
||||
pr_err("%s: Failed to start request %x, at %x\n",
|
||||
__func__, rq->fence.seqno, hws_seqno(&h, rq));
|
||||
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
|
||||
|
||||
i915_gem_set_wedged(i915);
|
||||
goto out_reset;
|
||||
}
|
||||
|
||||
init_completion(&arg.completion);
|
||||
|
||||
tsk = kthread_run(evict_vma, &arg, "igt/evict_vma");
|
||||
if (IS_ERR(tsk)) {
|
||||
err = PTR_ERR(tsk);
|
||||
tsk = NULL;
|
||||
goto out_reset;
|
||||
}
|
||||
|
||||
wait_for_completion(&arg.completion);
|
||||
|
||||
if (wait_for(waitqueue_active(&rq->execute), 10)) {
|
||||
struct drm_printer p = drm_info_printer(i915->drm.dev);
|
||||
|
||||
pr_err("igt/evict_vma kthread did not wait\n");
|
||||
intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
|
||||
|
||||
i915_gem_set_wedged(i915);
|
||||
goto out_reset;
|
||||
}
|
||||
|
||||
out_reset:
|
||||
fake_hangcheck(rq, intel_engine_flag(rq->engine));
|
||||
|
||||
if (tsk) {
|
||||
struct igt_wedge_me w;
|
||||
|
||||
/* The reset, even indirectly, should take less than 10ms. */
|
||||
igt_wedge_on_timeout(&w, i915, HZ / 10 /* 100ms timeout*/)
|
||||
err = kthread_stop(tsk);
|
||||
}
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
out_rq:
|
||||
i915_request_put(rq);
|
||||
out_obj:
|
||||
i915_gem_object_put(obj);
|
||||
fini:
|
||||
hang_fini(&h);
|
||||
unlock:
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
global_reset_unlock(i915);
|
||||
|
||||
if (i915_terminally_wedged(&i915->gpu_error))
|
||||
return -EIO;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int igt_reset_evict_ggtt(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
|
||||
return __igt_reset_evict_vma(i915, &i915->ggtt.vm);
|
||||
}
|
||||
|
||||
static int igt_reset_evict_ppgtt(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct i915_gem_context *ctx;
|
||||
int err;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
ctx = kernel_context(i915);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
err = 0;
|
||||
if (ctx->ppgtt) /* aliasing == global gtt locking, covered above */
|
||||
err = __igt_reset_evict_vma(i915, &ctx->ppgtt->vm);
|
||||
|
||||
kernel_context_close(ctx);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int wait_for_others(struct drm_i915_private *i915,
|
||||
struct intel_engine_cs *exclude)
|
||||
{
|
||||
|
@ -1240,8 +1405,10 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
|
|||
SUBTEST(igt_reset_idle_engine),
|
||||
SUBTEST(igt_reset_active_engine),
|
||||
SUBTEST(igt_reset_engines),
|
||||
SUBTEST(igt_wait_reset),
|
||||
SUBTEST(igt_reset_queue),
|
||||
SUBTEST(igt_reset_wait),
|
||||
SUBTEST(igt_reset_evict_ggtt),
|
||||
SUBTEST(igt_reset_evict_ppgtt),
|
||||
SUBTEST(igt_handle_error),
|
||||
};
|
||||
bool saved_hangcheck;
|
||||
|
|
|
@ -451,12 +451,127 @@ static int live_late_preempt(void *arg)
|
|||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
static int live_preempt_hang(void *arg)
|
||||
{
|
||||
struct drm_i915_private *i915 = arg;
|
||||
struct i915_gem_context *ctx_hi, *ctx_lo;
|
||||
struct spinner spin_hi, spin_lo;
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int err = -ENOMEM;
|
||||
|
||||
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
|
||||
return 0;
|
||||
|
||||
if (!intel_has_reset_engine(i915))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&i915->drm.struct_mutex);
|
||||
|
||||
if (spinner_init(&spin_hi, i915))
|
||||
goto err_unlock;
|
||||
|
||||
if (spinner_init(&spin_lo, i915))
|
||||
goto err_spin_hi;
|
||||
|
||||
ctx_hi = kernel_context(i915);
|
||||
if (!ctx_hi)
|
||||
goto err_spin_lo;
|
||||
ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
|
||||
|
||||
ctx_lo = kernel_context(i915);
|
||||
if (!ctx_lo)
|
||||
goto err_ctx_hi;
|
||||
ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
struct i915_request *rq;
|
||||
|
||||
if (!intel_engine_has_preemption(engine))
|
||||
continue;
|
||||
|
||||
rq = spinner_create_request(&spin_lo, ctx_lo, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
i915_request_add(rq);
|
||||
if (!wait_for_spinner(&spin_lo, rq)) {
|
||||
GEM_TRACE("lo spinner failed to start\n");
|
||||
GEM_TRACE_DUMP();
|
||||
i915_gem_set_wedged(i915);
|
||||
err = -EIO;
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
rq = spinner_create_request(&spin_hi, ctx_hi, engine,
|
||||
MI_ARB_CHECK);
|
||||
if (IS_ERR(rq)) {
|
||||
spinner_end(&spin_lo);
|
||||
err = PTR_ERR(rq);
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
init_completion(&engine->execlists.preempt_hang.completion);
|
||||
engine->execlists.preempt_hang.inject_hang = true;
|
||||
|
||||
i915_request_add(rq);
|
||||
|
||||
if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
|
||||
HZ / 10)) {
|
||||
pr_err("Preemption did not occur within timeout!");
|
||||
GEM_TRACE_DUMP();
|
||||
i915_gem_set_wedged(i915);
|
||||
err = -EIO;
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
|
||||
i915_reset_engine(engine, NULL);
|
||||
clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
|
||||
|
||||
engine->execlists.preempt_hang.inject_hang = false;
|
||||
|
||||
if (!wait_for_spinner(&spin_hi, rq)) {
|
||||
GEM_TRACE("hi spinner failed to start\n");
|
||||
GEM_TRACE_DUMP();
|
||||
i915_gem_set_wedged(i915);
|
||||
err = -EIO;
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
|
||||
spinner_end(&spin_hi);
|
||||
spinner_end(&spin_lo);
|
||||
if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
|
||||
err = -EIO;
|
||||
goto err_ctx_lo;
|
||||
}
|
||||
}
|
||||
|
||||
err = 0;
|
||||
err_ctx_lo:
|
||||
kernel_context_close(ctx_lo);
|
||||
err_ctx_hi:
|
||||
kernel_context_close(ctx_hi);
|
||||
err_spin_lo:
|
||||
spinner_fini(&spin_lo);
|
||||
err_spin_hi:
|
||||
spinner_fini(&spin_hi);
|
||||
err_unlock:
|
||||
igt_flush_test(i915, I915_WAIT_LOCKED);
|
||||
mutex_unlock(&i915->drm.struct_mutex);
|
||||
return err;
|
||||
}
|
||||
|
||||
int intel_execlists_live_selftests(struct drm_i915_private *i915)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(live_sanitycheck),
|
||||
SUBTEST(live_preempt),
|
||||
SUBTEST(live_late_preempt),
|
||||
SUBTEST(live_preempt_hang),
|
||||
};
|
||||
|
||||
if (!HAS_EXECLISTS(i915))
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
#include "../i915_selftest.h"
|
||||
|
||||
#include "igt_wedge_me.h"
|
||||
#include "mock_context.h"
|
||||
|
||||
static struct drm_i915_gem_object *
|
||||
|
@ -111,6 +112,7 @@ static int check_whitelist(const struct whitelist *w,
|
|||
struct intel_engine_cs *engine)
|
||||
{
|
||||
struct drm_i915_gem_object *results;
|
||||
struct igt_wedge_me wedge;
|
||||
u32 *vaddr;
|
||||
int err;
|
||||
int i;
|
||||
|
@ -119,7 +121,11 @@ static int check_whitelist(const struct whitelist *w,
|
|||
if (IS_ERR(results))
|
||||
return PTR_ERR(results);
|
||||
|
||||
err = i915_gem_object_set_to_cpu_domain(results, false);
|
||||
err = 0;
|
||||
igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
|
||||
err = i915_gem_object_set_to_cpu_domain(results, false);
|
||||
if (i915_terminally_wedged(&ctx->i915->gpu_error))
|
||||
err = -EIO;
|
||||
if (err)
|
||||
goto out_put;
|
||||
|
||||
|
|
|
@ -41,6 +41,7 @@ struct drm_vma_offset_node {
|
|||
rwlock_t vm_lock;
|
||||
struct drm_mm_node vm_node;
|
||||
struct rb_root vm_files;
|
||||
bool readonly:1;
|
||||
};
|
||||
|
||||
struct drm_vma_offset_manager {
|
||||
|
|
|
@ -95,7 +95,9 @@ extern struct resource intel_graphics_stolen_res;
|
|||
#define I845_TSEG_SIZE_512K (2 << 1)
|
||||
#define I845_TSEG_SIZE_1M (3 << 1)
|
||||
|
||||
#define INTEL_BSM 0x5c
|
||||
#define INTEL_BSM 0x5c
|
||||
#define INTEL_GEN11_BSM_DW0 0xc0
|
||||
#define INTEL_GEN11_BSM_DW1 0xc4
|
||||
#define INTEL_BSM_MASK (-(1u << 20))
|
||||
|
||||
#endif /* _I915_DRM_H_ */
|
||||
|
|
Loading…
Reference in New Issue