mm/hmm: a few more C style and comment clean ups
A few more comments and minor programming style clean ups. There should be no functional changes. Link: https://lore.kernel.org/r/20190726005650.2566-3-rcampbell@nvidia.com Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
1f96180792
commit
d2e8d55116
39
mm/hmm.c
39
mm/hmm.c
|
@ -32,7 +32,7 @@ static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
|
|||
* hmm_get_or_create - register HMM against an mm (HMM internal)
|
||||
*
|
||||
* @mm: mm struct to attach to
|
||||
* Returns: returns an HMM object, either by referencing the existing
|
||||
* Return: an HMM object, either by referencing the existing
|
||||
* (per-process) object, or by creating a new one.
|
||||
*
|
||||
* This is not intended to be used directly by device drivers. If mm already
|
||||
|
@ -325,8 +325,8 @@ static int hmm_pfns_bad(unsigned long addr,
|
|||
}
|
||||
|
||||
/*
|
||||
* hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
|
||||
* @start: range virtual start address (inclusive)
|
||||
* hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
|
||||
* @addr: range virtual start address (inclusive)
|
||||
* @end: range virtual end address (exclusive)
|
||||
* @fault: should we fault or not ?
|
||||
* @write_fault: write fault ?
|
||||
|
@ -376,9 +376,9 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
|
|||
/*
|
||||
* So we not only consider the individual per page request we also
|
||||
* consider the default flags requested for the range. The API can
|
||||
* be use in 2 fashions. The first one where the HMM user coalesce
|
||||
* multiple page fault into one request and set flags per pfns for
|
||||
* of those faults. The second one where the HMM user want to pre-
|
||||
* be used 2 ways. The first one where the HMM user coalesces
|
||||
* multiple page faults into one request and sets flags per pfn for
|
||||
* those faults. The second one where the HMM user wants to pre-
|
||||
* fault a range with specific flags. For the latter one it is a
|
||||
* waste to have the user pre-fill the pfn arrays with a default
|
||||
* flags value.
|
||||
|
@ -388,7 +388,7 @@ static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
|
|||
/* We aren't ask to do anything ... */
|
||||
if (!(pfns & range->flags[HMM_PFN_VALID]))
|
||||
return;
|
||||
/* If this is device memory than only fault if explicitly requested */
|
||||
/* If this is device memory then only fault if explicitly requested */
|
||||
if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
|
||||
/* Do we fault on device memory ? */
|
||||
if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
|
||||
|
@ -502,7 +502,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk,
|
|||
hmm_vma_walk->last = end;
|
||||
return 0;
|
||||
#else
|
||||
/* If THP is not enabled then we should never reach that code ! */
|
||||
/* If THP is not enabled then we should never reach this code ! */
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
@ -522,7 +522,6 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
|
|||
{
|
||||
struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
||||
struct hmm_range *range = hmm_vma_walk->range;
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
bool fault, write_fault;
|
||||
uint64_t cpu_flags;
|
||||
pte_t pte = *ptep;
|
||||
|
@ -571,8 +570,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
|
|||
if (fault || write_fault) {
|
||||
pte_unmap(ptep);
|
||||
hmm_vma_walk->last = addr;
|
||||
migration_entry_wait(vma->vm_mm,
|
||||
pmdp, addr);
|
||||
migration_entry_wait(walk->mm, pmdp, addr);
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
|
@ -620,13 +618,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
|
|||
{
|
||||
struct hmm_vma_walk *hmm_vma_walk = walk->private;
|
||||
struct hmm_range *range = hmm_vma_walk->range;
|
||||
struct vm_area_struct *vma = walk->vma;
|
||||
uint64_t *pfns = range->pfns;
|
||||
unsigned long addr = start, i;
|
||||
pte_t *ptep;
|
||||
pmd_t pmd;
|
||||
|
||||
|
||||
again:
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
if (pmd_none(pmd))
|
||||
|
@ -648,7 +644,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
|
|||
0, &fault, &write_fault);
|
||||
if (fault || write_fault) {
|
||||
hmm_vma_walk->last = addr;
|
||||
pmd_migration_entry_wait(vma->vm_mm, pmdp);
|
||||
pmd_migration_entry_wait(walk->mm, pmdp);
|
||||
return -EBUSY;
|
||||
}
|
||||
return 0;
|
||||
|
@ -657,11 +653,11 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
|
|||
|
||||
if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
|
||||
/*
|
||||
* No need to take pmd_lock here, even if some other threads
|
||||
* No need to take pmd_lock here, even if some other thread
|
||||
* is splitting the huge pmd we will get that event through
|
||||
* mmu_notifier callback.
|
||||
*
|
||||
* So just read pmd value and check again its a transparent
|
||||
* So just read pmd value and check again it's a transparent
|
||||
* huge or device mapping one and compute corresponding pfn
|
||||
* values.
|
||||
*/
|
||||
|
@ -675,7 +671,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
|
|||
}
|
||||
|
||||
/*
|
||||
* We have handled all the valid case above ie either none, migration,
|
||||
* We have handled all the valid cases above ie either none, migration,
|
||||
* huge or transparent huge. At this point either it is a valid pmd
|
||||
* entry pointing to pte directory or it is a bad pmd that will not
|
||||
* recover.
|
||||
|
@ -795,10 +791,10 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
|
|||
pte_t entry;
|
||||
int ret = 0;
|
||||
|
||||
size = 1UL << huge_page_shift(h);
|
||||
size = huge_page_size(h);
|
||||
mask = size - 1;
|
||||
if (range->page_shift != PAGE_SHIFT) {
|
||||
/* Make sure we are looking at full page. */
|
||||
/* Make sure we are looking at a full page. */
|
||||
if (start & mask)
|
||||
return -EINVAL;
|
||||
if (end < (start + size))
|
||||
|
@ -809,8 +805,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
|
|||
size = PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
||||
ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
|
||||
ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
|
||||
entry = huge_ptep_get(pte);
|
||||
|
||||
i = (start - range->start) >> range->page_shift;
|
||||
|
@ -859,7 +854,7 @@ static void hmm_pfns_clear(struct hmm_range *range,
|
|||
* @start: start virtual address (inclusive)
|
||||
* @end: end virtual address (exclusive)
|
||||
* @page_shift: expect page shift for the range
|
||||
* Returns 0 on success, -EFAULT if the address space is no longer valid
|
||||
* Return: 0 on success, -EFAULT if the address space is no longer valid
|
||||
*
|
||||
* Track updates to the CPU page table see include/linux/hmm.h
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue