mm/hmm: remove the page_shift member from struct hmm_range

All users pass PAGE_SIZE here, and if we wanted to support single entries
for huge pages we should really just add a HMM_FAULT_HUGEPAGE flag instead
that uses the huge page size instead of having the caller calculate that
size once, just for the hmm code to verify it.

Link: https://lore.kernel.org/r/20190806160554.14046-8-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Christoph Hellwig 2019-08-06 19:05:45 +03:00 committed by Jason Gunthorpe
parent fac555ac93
commit 7f08263d9b
4 changed files with 9 additions and 57 deletions

View File

@ -818,7 +818,6 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
0 : range->flags[HMM_PFN_WRITE]; 0 : range->flags[HMM_PFN_WRITE];
range->pfn_flags_mask = 0; range->pfn_flags_mask = 0;
range->pfns = pfns; range->pfns = pfns;
range->page_shift = PAGE_SHIFT;
range->start = start; range->start = start;
range->end = start + ttm->num_pages * PAGE_SIZE; range->end = start + ttm->num_pages * PAGE_SIZE;

View File

@ -680,7 +680,6 @@ nouveau_svm_fault(struct nvif_notify *notify)
args.i.p.addr + args.i.p.size, fn - fi); args.i.p.addr + args.i.p.size, fn - fi);
/* Have HMM fault pages within the fault window to the GPU. */ /* Have HMM fault pages within the fault window to the GPU. */
range.page_shift = PAGE_SHIFT;
range.start = args.i.p.addr; range.start = args.i.p.addr;
range.end = args.i.p.addr + args.i.p.size; range.end = args.i.p.addr + args.i.p.size;
range.pfns = args.phys; range.pfns = args.phys;

View File

@ -158,7 +158,6 @@ enum hmm_pfn_value_e {
* @values: pfn value for some special case (none, special, error, ...) * @values: pfn value for some special case (none, special, error, ...)
* @default_flags: default flags for the range (write, read, ... see hmm doc) * @default_flags: default flags for the range (write, read, ... see hmm doc)
* @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter * @pfn_flags_mask: allows to mask pfn flags so that only default_flags matter
* @page_shift: device virtual address shift value (should be >= PAGE_SHIFT)
* @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT) * @pfn_shifts: pfn shift value (should be <= PAGE_SHIFT)
* @valid: pfns array did not change since it has been fill by an HMM function * @valid: pfns array did not change since it has been fill by an HMM function
*/ */
@ -172,31 +171,10 @@ struct hmm_range {
const uint64_t *values; const uint64_t *values;
uint64_t default_flags; uint64_t default_flags;
uint64_t pfn_flags_mask; uint64_t pfn_flags_mask;
uint8_t page_shift;
uint8_t pfn_shift; uint8_t pfn_shift;
bool valid; bool valid;
}; };
/*
* hmm_range_page_shift() - return the page shift for the range
* @range: range being queried
* Return: page shift (page size = 1 << page shift) for the range
*/
static inline unsigned hmm_range_page_shift(const struct hmm_range *range)
{
return range->page_shift;
}
/*
* hmm_range_page_size() - return the page size for the range
* @range: range being queried
* Return: page size for the range in bytes
*/
static inline unsigned long hmm_range_page_size(const struct hmm_range *range)
{
return 1UL << hmm_range_page_shift(range);
}
/* /*
* hmm_range_wait_until_valid() - wait for range to be valid * hmm_range_wait_until_valid() - wait for range to be valid
* @range: range affected by invalidation to wait on * @range: range affected by invalidation to wait on

View File

@ -345,13 +345,12 @@ static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range; struct hmm_range *range = hmm_vma_walk->range;
uint64_t *pfns = range->pfns; uint64_t *pfns = range->pfns;
unsigned long i, page_size; unsigned long i;
hmm_vma_walk->last = addr; hmm_vma_walk->last = addr;
page_size = hmm_range_page_size(range); i = (addr - range->start) >> PAGE_SHIFT;
i = (addr - range->start) >> range->page_shift;
for (; addr < end; addr += page_size, i++) { for (; addr < end; addr += PAGE_SIZE, i++) {
pfns[i] = range->values[HMM_PFN_NONE]; pfns[i] = range->values[HMM_PFN_NONE];
if (fault || write_fault) { if (fault || write_fault) {
int ret; int ret;
@ -779,7 +778,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
struct mm_walk *walk) struct mm_walk *walk)
{ {
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
unsigned long addr = start, i, pfn, mask, size, pfn_inc; unsigned long addr = start, i, pfn, mask;
struct hmm_vma_walk *hmm_vma_walk = walk->private; struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range; struct hmm_range *range = hmm_vma_walk->range;
struct vm_area_struct *vma = walk->vma; struct vm_area_struct *vma = walk->vma;
@ -790,24 +789,12 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
pte_t entry; pte_t entry;
int ret = 0; int ret = 0;
size = huge_page_size(h); mask = huge_page_size(h) - 1;
mask = size - 1;
if (range->page_shift != PAGE_SHIFT) {
/* Make sure we are looking at a full page. */
if (start & mask)
return -EINVAL;
if (end < (start + size))
return -EINVAL;
pfn_inc = size >> PAGE_SHIFT;
} else {
pfn_inc = 1;
size = PAGE_SIZE;
}
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte); ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
entry = huge_ptep_get(pte); entry = huge_ptep_get(pte);
i = (start - range->start) >> range->page_shift; i = (start - range->start) >> PAGE_SHIFT;
orig_pfn = range->pfns[i]; orig_pfn = range->pfns[i];
range->pfns[i] = range->values[HMM_PFN_NONE]; range->pfns[i] = range->values[HMM_PFN_NONE];
cpu_flags = pte_to_hmm_pfn_flags(range, entry); cpu_flags = pte_to_hmm_pfn_flags(range, entry);
@ -819,8 +806,8 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
goto unlock; goto unlock;
} }
pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift); pfn = pte_pfn(entry) + ((start & mask) >> PAGE_SHIFT);
for (; addr < end; addr += size, i++, pfn += pfn_inc) for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) | range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
cpu_flags; cpu_flags;
hmm_vma_walk->last = end; hmm_vma_walk->last = end;
@ -857,14 +844,13 @@ static void hmm_pfns_clear(struct hmm_range *range,
*/ */
int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror) int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror)
{ {
unsigned long mask = ((1UL << range->page_shift) - 1UL);
struct hmm *hmm = mirror->hmm; struct hmm *hmm = mirror->hmm;
unsigned long flags; unsigned long flags;
range->valid = false; range->valid = false;
range->hmm = NULL; range->hmm = NULL;
if ((range->start & mask) || (range->end & mask)) if ((range->start & (PAGE_SIZE - 1)) || (range->end & (PAGE_SIZE - 1)))
return -EINVAL; return -EINVAL;
if (range->start >= range->end) if (range->start >= range->end)
return -EINVAL; return -EINVAL;
@ -971,16 +957,6 @@ long hmm_range_fault(struct hmm_range *range, unsigned int flags)
if (vma == NULL || (vma->vm_flags & device_vma)) if (vma == NULL || (vma->vm_flags & device_vma))
return -EFAULT; return -EFAULT;
if (is_vm_hugetlb_page(vma)) {
if (huge_page_shift(hstate_vma(vma)) !=
range->page_shift &&
range->page_shift != PAGE_SHIFT)
return -EINVAL;
} else {
if (range->page_shift != PAGE_SHIFT)
return -EINVAL;
}
if (!(vma->vm_flags & VM_READ)) { if (!(vma->vm_flags & VM_READ)) {
/* /*
* If vma do not allow read access, then assume that it * If vma do not allow read access, then assume that it