mirror of https://gitee.com/openkylin/linux.git
xen/gntdev: use mmu_interval_notifier_insert
gntdev simply wants to monitor a specific VMA for any notifier events, this can be done straightforwardly using mmu_interval_notifier_insert() over the VMA's VA range. The notifier should be attached until the original VMA is destroyed. It is unclear if any of this is even sane, but at least a lot of duplicate code is removed. Link: https://lore.kernel.org/r/20191112202231.3856-15-jgg@ziepe.ca Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
a22dd50640
commit
d3eeb1d77c
|
@ -21,15 +21,8 @@ struct gntdev_dmabuf_priv;
|
|||
struct gntdev_priv {
|
||||
/* Maps with visible offsets in the file descriptor. */
|
||||
struct list_head maps;
|
||||
/*
|
||||
* Maps that are not visible; will be freed on munmap.
|
||||
* Only populated if populate_freeable_maps == 1
|
||||
*/
|
||||
struct list_head freeable_maps;
|
||||
/* lock protects maps and freeable_maps. */
|
||||
struct mutex lock;
|
||||
struct mm_struct *mm;
|
||||
struct mmu_notifier mn;
|
||||
|
||||
#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
|
||||
/* Device for which DMA memory is allocated. */
|
||||
|
@ -49,6 +42,7 @@ struct gntdev_unmap_notify {
|
|||
};
|
||||
|
||||
struct gntdev_grant_map {
|
||||
struct mmu_interval_notifier notifier;
|
||||
struct list_head next;
|
||||
struct vm_area_struct *vma;
|
||||
int index;
|
||||
|
|
|
@ -63,7 +63,6 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
|
|||
static atomic_t pages_mapped = ATOMIC_INIT(0);
|
||||
|
||||
static int use_ptemod;
|
||||
#define populate_freeable_maps use_ptemod
|
||||
|
||||
static int unmap_grant_pages(struct gntdev_grant_map *map,
|
||||
int offset, int pages);
|
||||
|
@ -249,12 +248,6 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
|
|||
evtchn_put(map->notify.event);
|
||||
}
|
||||
|
||||
if (populate_freeable_maps && priv) {
|
||||
mutex_lock(&priv->lock);
|
||||
list_del(&map->next);
|
||||
mutex_unlock(&priv->lock);
|
||||
}
|
||||
|
||||
if (map->pages && !use_ptemod)
|
||||
unmap_grant_pages(map, 0, map->count);
|
||||
gntdev_free_map(map);
|
||||
|
@ -444,16 +437,9 @@ static void gntdev_vma_close(struct vm_area_struct *vma)
|
|||
|
||||
pr_debug("gntdev_vma_close %p\n", vma);
|
||||
if (use_ptemod) {
|
||||
/* It is possible that an mmu notifier could be running
|
||||
* concurrently, so take priv->lock to ensure that the vma won't
|
||||
* vanishing during the unmap_grant_pages call, since we will
|
||||
* spin here until that completes. Such a concurrent call will
|
||||
* not do any unmapping, since that has been done prior to
|
||||
* closing the vma, but it may still iterate the unmap_ops list.
|
||||
*/
|
||||
mutex_lock(&priv->lock);
|
||||
WARN_ON(map->vma != vma);
|
||||
mmu_interval_notifier_remove(&map->notifier);
|
||||
map->vma = NULL;
|
||||
mutex_unlock(&priv->lock);
|
||||
}
|
||||
vma->vm_private_data = NULL;
|
||||
gntdev_put_map(priv, map);
|
||||
|
@ -475,109 +461,44 @@ static const struct vm_operations_struct gntdev_vmops = {
|
|||
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
||||
static bool in_range(struct gntdev_grant_map *map,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!map->vma)
|
||||
return false;
|
||||
if (map->vma->vm_start >= end)
|
||||
return false;
|
||||
if (map->vma->vm_end <= start)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int unmap_if_in_range(struct gntdev_grant_map *map,
|
||||
unsigned long start, unsigned long end,
|
||||
bool blockable)
|
||||
static bool gntdev_invalidate(struct mmu_interval_notifier *mn,
|
||||
const struct mmu_notifier_range *range,
|
||||
unsigned long cur_seq)
|
||||
{
|
||||
struct gntdev_grant_map *map =
|
||||
container_of(mn, struct gntdev_grant_map, notifier);
|
||||
unsigned long mstart, mend;
|
||||
int err;
|
||||
|
||||
if (!in_range(map, start, end))
|
||||
return 0;
|
||||
if (!mmu_notifier_range_blockable(range))
|
||||
return false;
|
||||
|
||||
if (!blockable)
|
||||
return -EAGAIN;
|
||||
/*
|
||||
* If the VMA is split or otherwise changed the notifier is not
|
||||
* updated, but we don't want to process VA's outside the modified
|
||||
* VMA. FIXME: It would be much more understandable to just prevent
|
||||
* modifying the VMA in the first place.
|
||||
*/
|
||||
if (map->vma->vm_start >= range->end ||
|
||||
map->vma->vm_end <= range->start)
|
||||
return true;
|
||||
|
||||
mstart = max(start, map->vma->vm_start);
|
||||
mend = min(end, map->vma->vm_end);
|
||||
mstart = max(range->start, map->vma->vm_start);
|
||||
mend = min(range->end, map->vma->vm_end);
|
||||
pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
|
||||
map->index, map->count,
|
||||
map->vma->vm_start, map->vma->vm_end,
|
||||
start, end, mstart, mend);
|
||||
range->start, range->end, mstart, mend);
|
||||
err = unmap_grant_pages(map,
|
||||
(mstart - map->vma->vm_start) >> PAGE_SHIFT,
|
||||
(mend - mstart) >> PAGE_SHIFT);
|
||||
WARN_ON(err);
|
||||
|
||||
return 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
static int mn_invl_range_start(struct mmu_notifier *mn,
|
||||
const struct mmu_notifier_range *range)
|
||||
{
|
||||
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
|
||||
struct gntdev_grant_map *map;
|
||||
int ret = 0;
|
||||
|
||||
if (mmu_notifier_range_blockable(range))
|
||||
mutex_lock(&priv->lock);
|
||||
else if (!mutex_trylock(&priv->lock))
|
||||
return -EAGAIN;
|
||||
|
||||
list_for_each_entry(map, &priv->maps, next) {
|
||||
ret = unmap_if_in_range(map, range->start, range->end,
|
||||
mmu_notifier_range_blockable(range));
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
}
|
||||
list_for_each_entry(map, &priv->freeable_maps, next) {
|
||||
ret = unmap_if_in_range(map, range->start, range->end,
|
||||
mmu_notifier_range_blockable(range));
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mn_release(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
|
||||
struct gntdev_grant_map *map;
|
||||
int err;
|
||||
|
||||
mutex_lock(&priv->lock);
|
||||
list_for_each_entry(map, &priv->maps, next) {
|
||||
if (!map->vma)
|
||||
continue;
|
||||
pr_debug("map %d+%d (%lx %lx)\n",
|
||||
map->index, map->count,
|
||||
map->vma->vm_start, map->vma->vm_end);
|
||||
err = unmap_grant_pages(map, /* offset */ 0, map->count);
|
||||
WARN_ON(err);
|
||||
}
|
||||
list_for_each_entry(map, &priv->freeable_maps, next) {
|
||||
if (!map->vma)
|
||||
continue;
|
||||
pr_debug("map %d+%d (%lx %lx)\n",
|
||||
map->index, map->count,
|
||||
map->vma->vm_start, map->vma->vm_end);
|
||||
err = unmap_grant_pages(map, /* offset */ 0, map->count);
|
||||
WARN_ON(err);
|
||||
}
|
||||
mutex_unlock(&priv->lock);
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops gntdev_mmu_ops = {
|
||||
.release = mn_release,
|
||||
.invalidate_range_start = mn_invl_range_start,
|
||||
static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
|
||||
.invalidate = gntdev_invalidate,
|
||||
};
|
||||
|
||||
/* ------------------------------------------------------------------ */
|
||||
|
@ -592,7 +513,6 @@ static int gntdev_open(struct inode *inode, struct file *flip)
|
|||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&priv->maps);
|
||||
INIT_LIST_HEAD(&priv->freeable_maps);
|
||||
mutex_init(&priv->lock);
|
||||
|
||||
#ifdef CONFIG_XEN_GNTDEV_DMABUF
|
||||
|
@ -604,17 +524,6 @@ static int gntdev_open(struct inode *inode, struct file *flip)
|
|||
}
|
||||
#endif
|
||||
|
||||
if (use_ptemod) {
|
||||
priv->mm = get_task_mm(current);
|
||||
if (!priv->mm) {
|
||||
kfree(priv);
|
||||
return -ENOMEM;
|
||||
}
|
||||
priv->mn.ops = &gntdev_mmu_ops;
|
||||
ret = mmu_notifier_register(&priv->mn, priv->mm);
|
||||
mmput(priv->mm);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
kfree(priv);
|
||||
return ret;
|
||||
|
@ -644,16 +553,12 @@ static int gntdev_release(struct inode *inode, struct file *flip)
|
|||
list_del(&map->next);
|
||||
gntdev_put_map(NULL /* already removed */, map);
|
||||
}
|
||||
WARN_ON(!list_empty(&priv->freeable_maps));
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
#ifdef CONFIG_XEN_GNTDEV_DMABUF
|
||||
gntdev_dmabuf_fini(priv->dmabuf_priv);
|
||||
#endif
|
||||
|
||||
if (use_ptemod)
|
||||
mmu_notifier_unregister(&priv->mn, priv->mm);
|
||||
|
||||
kfree(priv);
|
||||
return 0;
|
||||
}
|
||||
|
@ -714,8 +619,6 @@ static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
|
|||
map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
|
||||
if (map) {
|
||||
list_del(&map->next);
|
||||
if (populate_freeable_maps)
|
||||
list_add_tail(&map->next, &priv->freeable_maps);
|
||||
err = 0;
|
||||
}
|
||||
mutex_unlock(&priv->lock);
|
||||
|
@ -1087,11 +990,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
|
|||
goto unlock_out;
|
||||
if (use_ptemod && map->vma)
|
||||
goto unlock_out;
|
||||
if (use_ptemod && priv->mm != vma->vm_mm) {
|
||||
pr_warn("Huh? Other mm?\n");
|
||||
goto unlock_out;
|
||||
}
|
||||
|
||||
refcount_inc(&map->users);
|
||||
|
||||
vma->vm_ops = &gntdev_vmops;
|
||||
|
@ -1102,10 +1000,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
|
|||
vma->vm_flags |= VM_DONTCOPY;
|
||||
|
||||
vma->vm_private_data = map;
|
||||
|
||||
if (use_ptemod)
|
||||
map->vma = vma;
|
||||
|
||||
if (map->flags) {
|
||||
if ((vma->vm_flags & VM_WRITE) &&
|
||||
(map->flags & GNTMAP_readonly))
|
||||
|
@ -1116,8 +1010,28 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
|
|||
map->flags |= GNTMAP_readonly;
|
||||
}
|
||||
|
||||
if (use_ptemod) {
|
||||
map->vma = vma;
|
||||
err = mmu_interval_notifier_insert_locked(
|
||||
&map->notifier, vma->vm_mm, vma->vm_start,
|
||||
vma->vm_end - vma->vm_start, &gntdev_mmu_ops);
|
||||
if (err)
|
||||
goto out_unlock_put;
|
||||
}
|
||||
mutex_unlock(&priv->lock);
|
||||
|
||||
/*
|
||||
* gntdev takes the address of the PTE in find_grant_ptes() and passes
|
||||
* it to the hypervisor in gntdev_map_grant_pages(). The purpose of
|
||||
* the notifier is to prevent the hypervisor pointer to the PTE from
|
||||
* going stale.
|
||||
*
|
||||
* Since this vma's mappings can't be touched without the mmap_sem,
|
||||
* and we are holding it now, there is no need for the notifier_range
|
||||
* locking pattern.
|
||||
*/
|
||||
mmu_interval_read_begin(&map->notifier);
|
||||
|
||||
if (use_ptemod) {
|
||||
map->pages_vm_start = vma->vm_start;
|
||||
err = apply_to_page_range(vma->vm_mm, vma->vm_start,
|
||||
|
@ -1166,8 +1080,11 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
|
|||
mutex_unlock(&priv->lock);
|
||||
out_put_map:
|
||||
if (use_ptemod) {
|
||||
map->vma = NULL;
|
||||
unmap_grant_pages(map, 0, map->count);
|
||||
if (map->vma) {
|
||||
mmu_interval_notifier_remove(&map->notifier);
|
||||
map->vma = NULL;
|
||||
}
|
||||
}
|
||||
gntdev_put_map(priv, map);
|
||||
return err;
|
||||
|
|
Loading…
Reference in New Issue