mirror of https://gitee.com/openkylin/linux.git
mm/hmm: HMM should have a callback before MM is destroyed
hmm_mirror_register() registers a callback for when the CPU pagetable is modified. Normally, the device driver will call hmm_mirror_unregister() when the process using the device is finished. However, if the process exits uncleanly, the struct_mm can be destroyed with no warning to the device driver. Link: http://lkml.kernel.org/r/20180323005527.758-4-jglisse@redhat.com Signed-off-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Jérôme Glisse <jglisse@redhat.com> Reviewed-by: John Hubbard <jhubbard@nvidia.com> Cc: Evgeny Baskakov <ebaskakov@nvidia.com> Cc: Mark Hairgrove <mhairgrove@nvidia.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b28b08de43
commit
e1401513c6
|
@ -218,6 +218,16 @@ enum hmm_update_type {
|
|||
* @update: callback to update range on a device
|
||||
*/
|
||||
struct hmm_mirror_ops {
|
||||
/* release() - release hmm_mirror
|
||||
*
|
||||
* @mirror: pointer to struct hmm_mirror
|
||||
*
|
||||
* This is called when the mm_struct is being released.
|
||||
* The callback should make sure no references to the mirror occur
|
||||
* after the callback returns.
|
||||
*/
|
||||
void (*release)(struct hmm_mirror *mirror);
|
||||
|
||||
/* sync_cpu_device_pagetables() - synchronize page tables
|
||||
*
|
||||
* @mirror: pointer to struct hmm_mirror
|
||||
|
|
29
mm/hmm.c
29
mm/hmm.c
|
@ -160,6 +160,32 @@ static void hmm_invalidate_range(struct hmm *hmm,
|
|||
up_read(&hmm->mirrors_sem);
|
||||
}
|
||||
|
||||
static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
{
|
||||
struct hmm_mirror *mirror;
|
||||
struct hmm *hmm = mm->hmm;
|
||||
|
||||
down_write(&hmm->mirrors_sem);
|
||||
mirror = list_first_entry_or_null(&hmm->mirrors, struct hmm_mirror,
|
||||
list);
|
||||
while (mirror) {
|
||||
list_del_init(&mirror->list);
|
||||
if (mirror->ops->release) {
|
||||
/*
|
||||
* Drop mirrors_sem so callback can wait on any pending
|
||||
* work that might itself trigger mmu_notifier callback
|
||||
* and thus would deadlock with us.
|
||||
*/
|
||||
up_write(&hmm->mirrors_sem);
|
||||
mirror->ops->release(mirror);
|
||||
down_write(&hmm->mirrors_sem);
|
||||
}
|
||||
mirror = list_first_entry_or_null(&hmm->mirrors,
|
||||
struct hmm_mirror, list);
|
||||
}
|
||||
up_write(&hmm->mirrors_sem);
|
||||
}
|
||||
|
||||
static void hmm_invalidate_range_start(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start,
|
||||
|
@ -185,6 +211,7 @@ static void hmm_invalidate_range_end(struct mmu_notifier *mn,
|
|||
}
|
||||
|
||||
static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
|
||||
.release = hmm_release,
|
||||
.invalidate_range_start = hmm_invalidate_range_start,
|
||||
.invalidate_range_end = hmm_invalidate_range_end,
|
||||
};
|
||||
|
@ -230,7 +257,7 @@ void hmm_mirror_unregister(struct hmm_mirror *mirror)
|
|||
struct hmm *hmm = mirror->hmm;
|
||||
|
||||
down_write(&hmm->mirrors_sem);
|
||||
list_del(&mirror->list);
|
||||
list_del_init(&mirror->list);
|
||||
up_write(&hmm->mirrors_sem);
|
||||
}
|
||||
EXPORT_SYMBOL(hmm_mirror_unregister);
|
||||
|
|
Loading…
Reference in New Issue