瀏覽代碼

RDMA/umem: Avoid synchronize_srcu in the ODP MR destruction path

synchronize_rcu is slow enough that it should be avoided on the syscall
path when user space is destroying MRs. After all the rework we can now
trivially do this by having call_srcu kfree the per_mm.

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Jason Gunthorpe 6 年之前
父節點
當前提交
56ac9dd917
共有 2 個文件被更改,包括 9 次插入2 次删除
  1. 8 2
      drivers/infiniband/core/umem_odp.c
  2. 1 0
      include/rdma/ib_umem_odp.h

+ 8 - 2
drivers/infiniband/core/umem_odp.c

@@ -307,6 +307,11 @@ found:
 	return 0;
 }
 
+static void free_per_mm(struct rcu_head *rcu)
+{
+	kfree(container_of(rcu, struct ib_ucontext_per_mm, rcu));
+}
+
 void put_per_mm(struct ib_umem_odp *umem_odp)
 {
 	struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
@@ -334,9 +339,10 @@ void put_per_mm(struct ib_umem_odp *umem_odp)
 	per_mm->active = false;
 	up_write(&per_mm->umem_rwsem);
 
-	mmu_notifier_unregister(&per_mm->mn, per_mm->mm);
+	WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree.rb_root));
+	mmu_notifier_unregister_no_release(&per_mm->mn, per_mm->mm);
 	put_pid(per_mm->tgid);
-	kfree(per_mm);
+	mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm);
 }
 
 struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,

+ 1 - 0
include/rdma/ib_umem_odp.h

@@ -99,6 +99,7 @@ struct ib_ucontext_per_mm {
 	unsigned int odp_mrs_count;
 
 	struct list_head ucontext_list;
+	struct rcu_head rcu;
 };
 
 int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);