mm/hmm: Hold on to the mmget for the lifetime of the range
Range functions like hmm_range_snapshot() and hmm_range_fault() call find_vma, which requires hodling the mmget() and the mmap_sem for the mm. Make this simpler for the callers by holding the mmget() inside the range for the lifetime of the range. Other functions that accept a range should only be called if the range is registered. This has the side effect of directly preventing hmm_release() from happening while a range is registered. That means range->dead cannot be false during the lifetime of the range, so remove dead and hmm_mirror_mm_is_alive() entirely. Signed-off-by: Jason Gunthorpe <jgg@mellanox.com> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Tested-by: Philip Yang <Philip.Yang@amd.com>
This commit is contained in:
parent
157816f377
commit
47f245985a
|
@ -82,7 +82,6 @@
|
|||
* @mirrors_sem: read/write semaphore protecting the mirrors list
|
||||
* @wq: wait queue for user waiting on a range invalidation
|
||||
* @notifiers: count of active mmu notifiers
|
||||
* @dead: is the mm dead ?
|
||||
*/
|
||||
struct hmm {
|
||||
struct mm_struct *mm;
|
||||
|
@ -95,7 +94,6 @@ struct hmm {
|
|||
wait_queue_head_t wq;
|
||||
struct rcu_head rcu;
|
||||
long notifiers;
|
||||
bool dead;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -459,30 +457,6 @@ struct hmm_mirror {
|
|||
int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
|
||||
void hmm_mirror_unregister(struct hmm_mirror *mirror);
|
||||
|
||||
/*
|
||||
* hmm_mirror_mm_is_alive() - test if mm is still alive
|
||||
* @mirror: the HMM mm mirror for which we want to lock the mmap_sem
|
||||
* Return: false if the mm is dead, true otherwise
|
||||
*
|
||||
* This is an optimization, it will not always accurately return false if the
|
||||
* mm is dead; i.e., there can be false negatives (process is being killed but
|
||||
* HMM is not yet informed of that). It is only intended to be used to optimize
|
||||
* out cases where the driver is about to do something time consuming and it
|
||||
* would be better to skip it if the mm is dead.
|
||||
*/
|
||||
static inline bool hmm_mirror_mm_is_alive(struct hmm_mirror *mirror)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
|
||||
if (!mirror || !mirror->hmm)
|
||||
return false;
|
||||
mm = READ_ONCE(mirror->hmm->mm);
|
||||
if (mirror->hmm->dead || !mm)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Please see Documentation/vm/hmm.rst for how to use the range API.
|
||||
*/
|
||||
|
|
32
mm/hmm.c
32
mm/hmm.c
|
@ -67,7 +67,6 @@ static struct hmm *hmm_get_or_create(struct mm_struct *mm)
|
|||
mutex_init(&hmm->lock);
|
||||
kref_init(&hmm->kref);
|
||||
hmm->notifiers = 0;
|
||||
hmm->dead = false;
|
||||
hmm->mm = mm;
|
||||
|
||||
hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
|
||||
|
@ -120,21 +119,16 @@ static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
|||
{
|
||||
struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
|
||||
struct hmm_mirror *mirror;
|
||||
struct hmm_range *range;
|
||||
|
||||
/* Bail out if hmm is in the process of being freed */
|
||||
if (!kref_get_unless_zero(&hmm->kref))
|
||||
return;
|
||||
|
||||
/* Report this HMM as dying. */
|
||||
hmm->dead = true;
|
||||
|
||||
/* Wake-up everyone waiting on any range. */
|
||||
mutex_lock(&hmm->lock);
|
||||
list_for_each_entry(range, &hmm->ranges, list)
|
||||
range->valid = false;
|
||||
wake_up_all(&hmm->wq);
|
||||
mutex_unlock(&hmm->lock);
|
||||
/*
|
||||
* Since hmm_range_register() holds the mmget() lock hmm_release() is
|
||||
* prevented as long as a range exists.
|
||||
*/
|
||||
WARN_ON(!list_empty_careful(&hmm->ranges));
|
||||
|
||||
down_write(&hmm->mirrors_sem);
|
||||
mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
|
||||
|
@ -903,8 +897,8 @@ int hmm_range_register(struct hmm_range *range,
|
|||
range->start = start;
|
||||
range->end = end;
|
||||
|
||||
/* Check if hmm_mm_destroy() was call. */
|
||||
if (hmm->mm == NULL || hmm->dead)
|
||||
/* Prevent hmm_release() from running while the range is valid */
|
||||
if (!mmget_not_zero(hmm->mm))
|
||||
return -EFAULT;
|
||||
|
||||
/* Initialize range to track CPU page table updates. */
|
||||
|
@ -942,11 +936,12 @@ void hmm_range_unregister(struct hmm_range *range)
|
|||
return;
|
||||
|
||||
mutex_lock(&hmm->lock);
|
||||
list_del(&range->list);
|
||||
list_del_init(&range->list);
|
||||
mutex_unlock(&hmm->lock);
|
||||
|
||||
/* Drop reference taken by hmm_range_register() */
|
||||
range->valid = false;
|
||||
mmput(hmm->mm);
|
||||
hmm_put(hmm);
|
||||
range->hmm = NULL;
|
||||
}
|
||||
|
@ -974,10 +969,7 @@ long hmm_range_snapshot(struct hmm_range *range)
|
|||
struct vm_area_struct *vma;
|
||||
struct mm_walk mm_walk;
|
||||
|
||||
/* Check if hmm_mm_destroy() was call. */
|
||||
if (hmm->mm == NULL || hmm->dead)
|
||||
return -EFAULT;
|
||||
|
||||
lockdep_assert_held(&hmm->mm->mmap_sem);
|
||||
do {
|
||||
/* If range is no longer valid force retry. */
|
||||
if (!range->valid)
|
||||
|
@ -1072,9 +1064,7 @@ long hmm_range_fault(struct hmm_range *range, bool block)
|
|||
struct mm_walk mm_walk;
|
||||
int ret;
|
||||
|
||||
/* Check if hmm_mm_destroy() was call. */
|
||||
if (hmm->mm == NULL || hmm->dead)
|
||||
return -EFAULT;
|
||||
lockdep_assert_held(&hmm->mm->mmap_sem);
|
||||
|
||||
do {
|
||||
/* If range is no longer valid force retry. */
|
||||
|
|
Loading…
Reference in New Issue