mirror of https://gitee.com/openkylin/linux.git
drm/i915/gen8: Add 4 level support in insert_entries and clear_range
When 48b is enabled, gen8_ppgtt_insert_entries needs to read the Page Map Level 4 (PML4), before it selects which Page Directory Pointer (PDP) it will write to. Similarly, gen8_ppgtt_clear_range needs to get the correct PDP/PD range. This patch was inspired by Ben's "Depend exclusively on map and unmap_vma". v2: Rebase after s/page_tables/page_table/. v3: Remove unnecessary pdpe loop in gen8_ppgtt_clear_range_4lvl and use clamp_pdp in gen8_ppgtt_insert_entries (Akash). v4: Merge gen8_ppgtt_clear_range_4lvl into gen8_ppgtt_clear_range to maintain symmetry with gen8_ppgtt_insert_entries (Akash). v5: Do not mix pages and bytes in insert_entries (Akash). v6: Prevent overflow in sg_nents << PAGE_SHIFT, when inserting 4GB at once. v7: Rebase after Mika's ppgtt cleanup / scratch merge patch series. Use gen8_px_index functions, and remove unnecessary number of pages parameter in insert_pte_entries. v8: Change gen8_ppgtt_clear_pte_range to stop at PDP boundary, instead of adding and extra clamp function; remove unnecessary pdp_start/pdp_len variables (Akash). v9: pages->orig_nents instead of sg_nents(pages->sgl) to get the length (Akash). v10: Remove pdp warning check ingen8_ppgtt_insert_pte_entries until this commit (Akash). Reviewed-by: Akash Goel <akash.goel@intel.com> (v9) Cc: Akash Goel <akash.goel@intel.com> Signed-off-by: Michel Thierry <michel.thierry@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
3387d433b0
commit
de5ba8eb9c
|
@ -681,9 +681,9 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
|
|||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
gen8_pte_t *pt_vaddr;
|
||||
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
|
||||
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
|
||||
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
|
||||
unsigned pdpe = gen8_pdpe_index(start);
|
||||
unsigned pde = gen8_pde_index(start);
|
||||
unsigned pte = gen8_pte_index(start);
|
||||
unsigned num_entries = length >> PAGE_SHIFT;
|
||||
unsigned last_pte, i;
|
||||
|
||||
|
@ -722,7 +722,8 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
|
|||
|
||||
pte = 0;
|
||||
if (++pde == I915_PDES) {
|
||||
pdpe++;
|
||||
if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
|
||||
break;
|
||||
pde = 0;
|
||||
}
|
||||
}
|
||||
|
@ -735,12 +736,21 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
|
|||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */
|
||||
|
||||
gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
|
||||
I915_CACHE_LLC, use_scratch);
|
||||
|
||||
gen8_ppgtt_clear_pte_range(vm, pdp, start, length, scratch_pte);
|
||||
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
|
||||
gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
|
||||
scratch_pte);
|
||||
} else {
|
||||
uint64_t templ4, pml4e;
|
||||
struct i915_page_directory_pointer *pdp;
|
||||
|
||||
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
|
||||
gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
|
||||
scratch_pte);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -753,16 +763,13 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
|
|||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
gen8_pte_t *pt_vaddr;
|
||||
unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
|
||||
unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
|
||||
unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
|
||||
unsigned pdpe = gen8_pdpe_index(start);
|
||||
unsigned pde = gen8_pde_index(start);
|
||||
unsigned pte = gen8_pte_index(start);
|
||||
|
||||
pt_vaddr = NULL;
|
||||
|
||||
while (__sg_page_iter_next(sg_iter)) {
|
||||
if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
|
||||
break;
|
||||
|
||||
if (pt_vaddr == NULL) {
|
||||
struct i915_page_directory *pd = pdp->page_directory[pdpe];
|
||||
struct i915_page_table *pt = pd->page_table[pde];
|
||||
|
@ -776,7 +783,8 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
|
|||
kunmap_px(ppgtt, pt_vaddr);
|
||||
pt_vaddr = NULL;
|
||||
if (++pde == I915_PDES) {
|
||||
pdpe++;
|
||||
if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
|
||||
break;
|
||||
pde = 0;
|
||||
}
|
||||
pte = 0;
|
||||
|
@ -795,11 +803,23 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
|
|||
{
|
||||
struct i915_hw_ppgtt *ppgtt =
|
||||
container_of(vm, struct i915_hw_ppgtt, base);
|
||||
struct i915_page_directory_pointer *pdp = &ppgtt->pdp; /* FIXME: 48b */
|
||||
struct sg_page_iter sg_iter;
|
||||
|
||||
__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
|
||||
gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter, start, cache_level);
|
||||
|
||||
if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
|
||||
gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
|
||||
cache_level);
|
||||
} else {
|
||||
struct i915_page_directory_pointer *pdp;
|
||||
uint64_t templ4, pml4e;
|
||||
uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
|
||||
|
||||
gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, templ4, pml4e) {
|
||||
gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
|
||||
start, cache_level);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gen8_free_page_tables(struct drm_device *dev,
|
||||
|
|
Loading…
Reference in New Issue