RDMA/umem: Avoid synchronize_srcu in the ODP MR destruction path

synchronize_rcu is slow enough that it should be avoided on the syscall
path when user space is destroying MRs. After all the rework we can now
trivially do this by having call_srcu kfree the per_mm.

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Jason Gunthorpe 2018-09-16 20:48:11 +03:00 committed by Doug Ledford
parent be7a57b41a
commit 56ac9dd917
2 changed files with 9 additions and 2 deletions

View File

@ -307,6 +307,11 @@ static int get_per_mm(struct ib_umem_odp *umem_odp)
return 0;
}
static void free_per_mm(struct rcu_head *rcu)
{
kfree(container_of(rcu, struct ib_ucontext_per_mm, rcu));
}
void put_per_mm(struct ib_umem_odp *umem_odp)
{
struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
@ -334,9 +339,10 @@ void put_per_mm(struct ib_umem_odp *umem_odp)
per_mm->active = false;
up_write(&per_mm->umem_rwsem);
mmu_notifier_unregister(&per_mm->mn, per_mm->mm);
WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree.rb_root));
mmu_notifier_unregister_no_release(&per_mm->mn, per_mm->mm);
put_pid(per_mm->tgid);
kfree(per_mm);
mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm);
}
struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,

View File

@ -99,6 +99,7 @@ struct ib_ucontext_per_mm {
unsigned int odp_mrs_count;
struct list_head ucontext_list;
struct rcu_head rcu;
};
int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);