|
@@ -360,9 +360,10 @@ static int __must_check remove_commit_idr_uobject(struct ib_uobject *uobj,
|
|
|
|
|
|
/*
|
|
|
* We can only fail gracefully if the user requested to destroy the
|
|
|
- * object. In the rest of the cases, just remove whatever you can.
|
|
|
+ * object or when a retry may be called upon an error.
|
|
|
+ * In the rest of the cases, just remove whatever you can.
|
|
|
*/
|
|
|
- if (why == RDMA_REMOVE_DESTROY && ret)
|
|
|
+ if (ib_is_destroy_retryable(ret, why, uobj))
|
|
|
return ret;
|
|
|
|
|
|
ib_rdmacg_uncharge(&uobj->cg_obj, uobj->context->device,
|
|
@@ -393,7 +394,7 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
|
|
|
container_of(uobj, struct ib_uobject_file, uobj);
|
|
|
int ret = fd_type->context_closed(uobj_file, why);
|
|
|
|
|
|
- if (why == RDMA_REMOVE_DESTROY && ret)
|
|
|
+ if (ib_is_destroy_retryable(ret, why, uobj))
|
|
|
return ret;
|
|
|
|
|
|
if (why == RDMA_REMOVE_DURING_CLEANUP) {
|
|
@@ -422,7 +423,7 @@ static int __must_check _rdma_remove_commit_uobject(struct ib_uobject *uobj,
|
|
|
struct ib_ucontext *ucontext = uobj->context;
|
|
|
|
|
|
ret = uobj->type->type_class->remove_commit(uobj, why);
|
|
|
- if (ret && why == RDMA_REMOVE_DESTROY) {
|
|
|
+ if (ib_is_destroy_retryable(ret, why, uobj)) {
|
|
|
/* We couldn't remove the object, so just unlock the uobject */
|
|
|
atomic_set(&uobj->usecnt, 0);
|
|
|
uobj->type->type_class->lookup_put(uobj, true);
|
|
@@ -645,61 +646,77 @@ void uverbs_close_fd(struct file *f)
|
|
|
kref_put(uverbs_file_ref, ib_uverbs_release_file);
|
|
|
}
|
|
|
|
|
|
-void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed)
|
|
|
+static int __uverbs_cleanup_ucontext(struct ib_ucontext *ucontext,
|
|
|
+ enum rdma_remove_reason reason)
|
|
|
{
|
|
|
- enum rdma_remove_reason reason = device_removed ?
|
|
|
- RDMA_REMOVE_DRIVER_REMOVE : RDMA_REMOVE_CLOSE;
|
|
|
- unsigned int cur_order = 0;
|
|
|
+ struct ib_uobject *obj, *next_obj;
|
|
|
+ int ret = -EINVAL;
|
|
|
+ int err = 0;
|
|
|
|
|
|
+ /*
|
|
|
+ * This shouldn't run while executing other commands on this
|
|
|
+ * context. Thus, the only thing we should take care of is
|
|
|
+ * releasing a FD while traversing this list. The FD could be
|
|
|
+ * closed and released from the _release fop of this FD.
|
|
|
+ * In order to mitigate this, we add a lock.
|
|
|
+ * We take and release the lock per traversal in order to let
|
|
|
+ * other threads (which might still use the FDs) chance to run.
|
|
|
+ */
|
|
|
+ mutex_lock(&ucontext->uobjects_lock);
|
|
|
ucontext->cleanup_reason = reason;
|
|
|
+ list_for_each_entry_safe(obj, next_obj, &ucontext->uobjects, list) {
|
|
|
+ /*
|
|
|
+ * if we hit this WARN_ON, that means we are
|
|
|
+ * racing with a lookup_get.
|
|
|
+ */
|
|
|
+ WARN_ON(uverbs_try_lock_object(obj, true));
|
|
|
+ err = obj->type->type_class->remove_commit(obj, reason);
|
|
|
+
|
|
|
+ if (ib_is_destroy_retryable(err, reason, obj)) {
|
|
|
+ pr_debug("ib_uverbs: failed to remove uobject id %d err %d\n",
|
|
|
+ obj->id, err);
|
|
|
+ atomic_set(&obj->usecnt, 0);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (err)
|
|
|
+ pr_err("ib_uverbs: unable to remove uobject id %d err %d\n",
|
|
|
+ obj->id, err);
|
|
|
+
|
|
|
+ list_del(&obj->list);
|
|
|
+ /* put the ref we took when we created the object */
|
|
|
+ uverbs_uobject_put(obj);
|
|
|
+ ret = 0;
|
|
|
+ }
|
|
|
+ mutex_unlock(&ucontext->uobjects_lock);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+void uverbs_cleanup_ucontext(struct ib_ucontext *ucontext, bool device_removed)
|
|
|
+{
|
|
|
+ enum rdma_remove_reason reason = device_removed ?
|
|
|
+ RDMA_REMOVE_DRIVER_REMOVE :
|
|
|
+ RDMA_REMOVE_CLOSE;
|
|
|
/*
|
|
|
* Waits for all remove_commit and alloc_commit to finish. Logically, We
|
|
|
* want to hold this forever as the context is going to be destroyed,
|
|
|
* but we'll release it since it causes a "held lock freed" BUG message.
|
|
|
*/
|
|
|
down_write(&ucontext->cleanup_rwsem);
|
|
|
+ ucontext->cleanup_retryable = true;
|
|
|
+ while (!list_empty(&ucontext->uobjects))
|
|
|
+ if (__uverbs_cleanup_ucontext(ucontext, reason)) {
|
|
|
+ /*
|
|
|
+ * No entry was cleaned-up successfully during this
|
|
|
+ * iteration
|
|
|
+ */
|
|
|
+ break;
|
|
|
+ }
|
|
|
|
|
|
- while (!list_empty(&ucontext->uobjects)) {
|
|
|
- struct ib_uobject *obj, *next_obj;
|
|
|
- unsigned int next_order = UINT_MAX;
|
|
|
+ ucontext->cleanup_retryable = false;
|
|
|
+ if (!list_empty(&ucontext->uobjects))
|
|
|
+ __uverbs_cleanup_ucontext(ucontext, reason);
|
|
|
|
|
|
- /*
|
|
|
- * This shouldn't run while executing other commands on this
|
|
|
- * context. Thus, the only thing we should take care of is
|
|
|
- * releasing a FD while traversing this list. The FD could be
|
|
|
- * closed and released from the _release fop of this FD.
|
|
|
- * In order to mitigate this, we add a lock.
|
|
|
- * We take and release the lock per order traversal in order
|
|
|
- * to let other threads (which might still use the FDs) chance
|
|
|
- * to run.
|
|
|
- */
|
|
|
- mutex_lock(&ucontext->uobjects_lock);
|
|
|
- list_for_each_entry_safe(obj, next_obj, &ucontext->uobjects,
|
|
|
- list) {
|
|
|
- if (obj->type->destroy_order == cur_order) {
|
|
|
- int ret;
|
|
|
-
|
|
|
- /*
|
|
|
- * if we hit this WARN_ON, that means we are
|
|
|
- * racing with a lookup_get.
|
|
|
- */
|
|
|
- WARN_ON(uverbs_try_lock_object(obj, true));
|
|
|
- ret = obj->type->type_class->remove_commit(obj,
|
|
|
- reason);
|
|
|
- list_del(&obj->list);
|
|
|
- if (ret)
|
|
|
- pr_warn("ib_uverbs: failed to remove uobject id %d order %u\n",
|
|
|
- obj->id, cur_order);
|
|
|
- /* put the ref we took when we created the object */
|
|
|
- uverbs_uobject_put(obj);
|
|
|
- } else {
|
|
|
- next_order = min(next_order,
|
|
|
- obj->type->destroy_order);
|
|
|
- }
|
|
|
- }
|
|
|
- mutex_unlock(&ucontext->uobjects_lock);
|
|
|
- cur_order = next_order;
|
|
|
- }
|
|
|
up_write(&ucontext->cleanup_rwsem);
|
|
|
}
|
|
|
|