mirror of https://gitee.com/openkylin/linux.git
drm/i915: Setup all page directories for gen8
If the requested size is less than what the full range of pdps can address, we end up setting pdps for only the requested area. The logical context however needs all pdp entries to be valid. Prior to commit06fda602db
("drm/i915: Create page table allocators") we have been writing pdp entries with dma address of zero instead of valid pdps. This is supposedly bad even if those pdps are not addressed. As commit06fda602db
("drm/i915: Create page table allocators") introduced more dynamic structure for pdps, we ended up oopsing when we populated the lrc context. Analyzing this oops revealed the fact that we have not been writing valid pdps with bsw, as it is doing the ppgtt init with 2GB limit in some cases. We should do the right thing and setup the non addressable part pdps/pde/pte to scratch page through the minimal structure by having just pdp with pde entries pointing to same page with pte entries pointing to scratch page. But instead of going through that trouble, setup all the pdps through individual pd pages and pt entries, even for non addressable parts. And let the clear range point them to scratch page. This way we populate the lrc with valid pdps and wait for dynamic page allocation work to land, and do the heavy lifting for truncating page table tree according to usage. The regression of oopsing in init was introduced by commit06fda602db
("drm/i915: Create page table allocators") v2: Clear the range for the unused part also (Ville) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=89350 Cc: Michel Thierry <michel.thierry@intel.com> Cc: Ben Widawsky <benjamin.widawsky@intel.com> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com> Tested-by: Valtteri Rantala <valtteri.rantala@intel.com> Signed-off-by: Mika Kuoppala <mika.kuoppala@intel.com> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
c3346ef688
commit
2934368e41
|
@ -716,15 +716,19 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
||||||
if (size % (1<<30))
|
if (size % (1<<30))
|
||||||
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
|
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
|
||||||
|
|
||||||
/* 1. Do all our allocations for page directories and page tables. */
|
/* 1. Do all our allocations for page directories and page tables.
|
||||||
ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
|
* We allocate more than was asked so that we can point the unused parts
|
||||||
|
* to valid entries that point to scratch page. Dynamic page tables
|
||||||
|
* will fix this eventually.
|
||||||
|
*/
|
||||||
|
ret = gen8_ppgtt_alloc(ppgtt, GEN8_LEGACY_PDPES);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 2. Create DMA mappings for the page directories and page tables.
|
* 2. Create DMA mappings for the page directories and page tables.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < max_pdp; i++) {
|
for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
|
||||||
ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
|
ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto bail;
|
goto bail;
|
||||||
|
@ -744,7 +748,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
||||||
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
|
* plugged in correctly. So we do that now/here. For aliasing PPGTT, we
|
||||||
* will never need to touch the PDEs again.
|
* will never need to touch the PDEs again.
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < max_pdp; i++) {
|
for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
|
||||||
struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
|
struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
|
||||||
gen8_ppgtt_pde_t *pd_vaddr;
|
gen8_ppgtt_pde_t *pd_vaddr;
|
||||||
pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
|
pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
|
||||||
|
@ -764,9 +768,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
|
||||||
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
|
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
|
||||||
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
|
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
|
||||||
ppgtt->base.start = 0;
|
ppgtt->base.start = 0;
|
||||||
ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
|
|
||||||
|
|
||||||
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
|
/* This is the area that we advertise as usable for the caller */
|
||||||
|
ppgtt->base.total = max_pdp * GEN8_PDES_PER_PAGE * GEN8_PTES_PER_PAGE * PAGE_SIZE;
|
||||||
|
|
||||||
|
/* Set all ptes to a valid scratch page. Also above requested space */
|
||||||
|
ppgtt->base.clear_range(&ppgtt->base, 0,
|
||||||
|
ppgtt->num_pd_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE,
|
||||||
|
true);
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
|
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
|
||||||
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
|
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
|
||||||
|
|
Loading…
Reference in New Issue