mm/hmm: do not unconditionally set pfns when returning EBUSY
In hmm_vma_handle_pte() and hmm_vma_walk_hugetlb_entry() if fault happens
then -EBUSY will be returned and the pfns input flags will have been
destroyed.
For hmm_vma_handle_pte() set HMM_PFN_NONE only on the success returns that
don't otherwise store to pfns.
For hmm_vma_walk_hugetlb_entry() all exit paths already set pfns, so
remove the redundant store.
Fixes: 2aee09d8c1
("mm/hmm: change hmm_vma_fault() to allow write fault on page basis")
Link: https://lore.kernel.org/r/20200327200021.29372-8-jgg@ziepe.ca
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
f66c9a33ae
commit
846babe85e
7
mm/hmm.c
7
mm/hmm.c
|
@ -250,11 +250,11 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
|
|||
pte_t pte = *ptep;
|
||||
uint64_t orig_pfn = *pfn;
|
||||
|
||||
*pfn = range->values[HMM_PFN_NONE];
|
||||
if (pte_none(pte)) {
|
||||
required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0);
|
||||
if (required_fault)
|
||||
goto fault;
|
||||
*pfn = range->values[HMM_PFN_NONE];
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -275,8 +275,10 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
|
|||
}
|
||||
|
||||
required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0);
|
||||
if (!required_fault)
|
||||
if (!required_fault) {
|
||||
*pfn = range->values[HMM_PFN_NONE];
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!non_swap_entry(entry))
|
||||
goto fault;
|
||||
|
@ -494,7 +496,6 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
|
|||
|
||||
i = (start - range->start) >> PAGE_SHIFT;
|
||||
orig_pfn = range->pfns[i];
|
||||
range->pfns[i] = range->values[HMM_PFN_NONE];
|
||||
cpu_flags = pte_to_hmm_pfn_flags(range, entry);
|
||||
required_fault = hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags);
|
||||
if (required_fault) {
|
||||
|
|
Loading…
Reference in New Issue