|
|
@@ -57,6 +57,7 @@ static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
|
|
|
static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
|
|
|
static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
|
|
|
static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
|
|
|
+static struct uverbs_lock_class wq_lock_class = { .name = "WQ-uobj" };
|
|
|
|
|
|
/*
|
|
|
* The ib_uobject locking scheme is as follows:
|
|
|
@@ -243,6 +244,16 @@ static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
|
|
|
return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
|
|
|
}
|
|
|
|
|
|
+static struct ib_wq *idr_read_wq(int wq_handle, struct ib_ucontext *context)
|
|
|
+{
|
|
|
+ return idr_read_obj(&ib_uverbs_wq_idr, wq_handle, context, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static void put_wq_read(struct ib_wq *wq)
|
|
|
+{
|
|
|
+ put_uobj_read(wq->uobject);
|
|
|
+}
|
|
|
+
|
|
|
static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
|
|
|
{
|
|
|
struct ib_uobject *uobj;
|
|
|
@@ -326,6 +337,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
|
|
|
INIT_LIST_HEAD(&ucontext->qp_list);
|
|
|
INIT_LIST_HEAD(&ucontext->srq_list);
|
|
|
INIT_LIST_HEAD(&ucontext->ah_list);
|
|
|
+ INIT_LIST_HEAD(&ucontext->wq_list);
|
|
|
INIT_LIST_HEAD(&ucontext->xrcd_list);
|
|
|
INIT_LIST_HEAD(&ucontext->rule_list);
|
|
|
rcu_read_lock();
|
|
|
@@ -3056,6 +3068,237 @@ static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
|
|
|
+ struct ib_device *ib_dev,
|
|
|
+ struct ib_udata *ucore,
|
|
|
+ struct ib_udata *uhw)
|
|
|
+{
|
|
|
+ struct ib_uverbs_ex_create_wq cmd = {};
|
|
|
+ struct ib_uverbs_ex_create_wq_resp resp = {};
|
|
|
+ struct ib_uwq_object *obj;
|
|
|
+ int err = 0;
|
|
|
+ struct ib_cq *cq;
|
|
|
+ struct ib_pd *pd;
|
|
|
+ struct ib_wq *wq;
|
|
|
+ struct ib_wq_init_attr wq_init_attr = {};
|
|
|
+ size_t required_cmd_sz;
|
|
|
+ size_t required_resp_len;
|
|
|
+
|
|
|
+ required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
|
|
|
+ required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
|
|
|
+
|
|
|
+ if (ucore->inlen < required_cmd_sz)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (ucore->outlen < required_resp_len)
|
|
|
+ return -ENOSPC;
|
|
|
+
|
|
|
+ if (ucore->inlen > sizeof(cmd) &&
|
|
|
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
|
|
|
+ ucore->inlen - sizeof(cmd)))
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
+ err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
|
|
|
+ if (err)
|
|
|
+ return err;
|
|
|
+
|
|
|
+ if (cmd.comp_mask)
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
+ obj = kmalloc(sizeof(*obj), GFP_KERNEL);
|
|
|
+ if (!obj)
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
+ init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext,
|
|
|
+ &wq_lock_class);
|
|
|
+ down_write(&obj->uevent.uobject.mutex);
|
|
|
+ pd = idr_read_pd(cmd.pd_handle, file->ucontext);
|
|
|
+ if (!pd) {
|
|
|
+ err = -EINVAL;
|
|
|
+ goto err_uobj;
|
|
|
+ }
|
|
|
+
|
|
|
+ cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
|
|
|
+ if (!cq) {
|
|
|
+ err = -EINVAL;
|
|
|
+ goto err_put_pd;
|
|
|
+ }
|
|
|
+
|
|
|
+ wq_init_attr.cq = cq;
|
|
|
+ wq_init_attr.max_sge = cmd.max_sge;
|
|
|
+ wq_init_attr.max_wr = cmd.max_wr;
|
|
|
+ wq_init_attr.wq_context = file;
|
|
|
+ wq_init_attr.wq_type = cmd.wq_type;
|
|
|
+ wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
|
|
|
+ obj->uevent.events_reported = 0;
|
|
|
+ INIT_LIST_HEAD(&obj->uevent.event_list);
|
|
|
+ wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
|
|
|
+ if (IS_ERR(wq)) {
|
|
|
+ err = PTR_ERR(wq);
|
|
|
+ goto err_put_cq;
|
|
|
+ }
|
|
|
+
|
|
|
+ wq->uobject = &obj->uevent.uobject;
|
|
|
+ obj->uevent.uobject.object = wq;
|
|
|
+ wq->wq_type = wq_init_attr.wq_type;
|
|
|
+ wq->cq = cq;
|
|
|
+ wq->pd = pd;
|
|
|
+ wq->device = pd->device;
|
|
|
+ wq->wq_context = wq_init_attr.wq_context;
|
|
|
+ atomic_set(&wq->usecnt, 0);
|
|
|
+ atomic_inc(&pd->usecnt);
|
|
|
+ atomic_inc(&cq->usecnt);
|
|
|
+ wq->uobject = &obj->uevent.uobject;
|
|
|
+ obj->uevent.uobject.object = wq;
|
|
|
+ err = idr_add_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject);
|
|
|
+ if (err)
|
|
|
+ goto destroy_wq;
|
|
|
+
|
|
|
+ memset(&resp, 0, sizeof(resp));
|
|
|
+ resp.wq_handle = obj->uevent.uobject.id;
|
|
|
+ resp.max_sge = wq_init_attr.max_sge;
|
|
|
+ resp.max_wr = wq_init_attr.max_wr;
|
|
|
+ resp.wqn = wq->wq_num;
|
|
|
+ resp.response_length = required_resp_len;
|
|
|
+ err = ib_copy_to_udata(ucore,
|
|
|
+ &resp, resp.response_length);
|
|
|
+ if (err)
|
|
|
+ goto err_copy;
|
|
|
+
|
|
|
+ put_pd_read(pd);
|
|
|
+ put_cq_read(cq);
|
|
|
+
|
|
|
+ mutex_lock(&file->mutex);
|
|
|
+ list_add_tail(&obj->uevent.uobject.list, &file->ucontext->wq_list);
|
|
|
+ mutex_unlock(&file->mutex);
|
|
|
+
|
|
|
+ obj->uevent.uobject.live = 1;
|
|
|
+ up_write(&obj->uevent.uobject.mutex);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err_copy:
|
|
|
+ idr_remove_uobj(&ib_uverbs_wq_idr, &obj->uevent.uobject);
|
|
|
+destroy_wq:
|
|
|
+ ib_destroy_wq(wq);
|
|
|
+err_put_cq:
|
|
|
+ put_cq_read(cq);
|
|
|
+err_put_pd:
|
|
|
+ put_pd_read(pd);
|
|
|
+err_uobj:
|
|
|
+ put_uobj_write(&obj->uevent.uobject);
|
|
|
+
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
|
|
|
+ struct ib_device *ib_dev,
|
|
|
+ struct ib_udata *ucore,
|
|
|
+ struct ib_udata *uhw)
|
|
|
+{
|
|
|
+ struct ib_uverbs_ex_destroy_wq cmd = {};
|
|
|
+ struct ib_uverbs_ex_destroy_wq_resp resp = {};
|
|
|
+ struct ib_wq *wq;
|
|
|
+ struct ib_uobject *uobj;
|
|
|
+ struct ib_uwq_object *obj;
|
|
|
+ size_t required_cmd_sz;
|
|
|
+ size_t required_resp_len;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle);
|
|
|
+ required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
|
|
|
+
|
|
|
+ if (ucore->inlen < required_cmd_sz)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (ucore->outlen < required_resp_len)
|
|
|
+ return -ENOSPC;
|
|
|
+
|
|
|
+ if (ucore->inlen > sizeof(cmd) &&
|
|
|
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
|
|
|
+ ucore->inlen - sizeof(cmd)))
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
+ ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ if (cmd.comp_mask)
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
+ resp.response_length = required_resp_len;
|
|
|
+ uobj = idr_write_uobj(&ib_uverbs_wq_idr, cmd.wq_handle,
|
|
|
+ file->ucontext);
|
|
|
+ if (!uobj)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ wq = uobj->object;
|
|
|
+ obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
|
|
|
+ ret = ib_destroy_wq(wq);
|
|
|
+ if (!ret)
|
|
|
+ uobj->live = 0;
|
|
|
+
|
|
|
+ put_uobj_write(uobj);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ idr_remove_uobj(&ib_uverbs_wq_idr, uobj);
|
|
|
+
|
|
|
+ mutex_lock(&file->mutex);
|
|
|
+ list_del(&uobj->list);
|
|
|
+ mutex_unlock(&file->mutex);
|
|
|
+
|
|
|
+ ib_uverbs_release_uevent(file, &obj->uevent);
|
|
|
+ resp.events_reported = obj->uevent.events_reported;
|
|
|
+ put_uobj(uobj);
|
|
|
+
|
|
|
+ ret = ib_copy_to_udata(ucore, &resp, resp.response_length);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
|
|
|
+ struct ib_device *ib_dev,
|
|
|
+ struct ib_udata *ucore,
|
|
|
+ struct ib_udata *uhw)
|
|
|
+{
|
|
|
+ struct ib_uverbs_ex_modify_wq cmd = {};
|
|
|
+ struct ib_wq *wq;
|
|
|
+ struct ib_wq_attr wq_attr = {};
|
|
|
+ size_t required_cmd_sz;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state);
|
|
|
+ if (ucore->inlen < required_cmd_sz)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (ucore->inlen > sizeof(cmd) &&
|
|
|
+ !ib_is_udata_cleared(ucore, sizeof(cmd),
|
|
|
+ ucore->inlen - sizeof(cmd)))
|
|
|
+ return -EOPNOTSUPP;
|
|
|
+
|
|
|
+ ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ if (!cmd.attr_mask)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ wq = idr_read_wq(cmd.wq_handle, file->ucontext);
|
|
|
+ if (!wq)
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
+ wq_attr.curr_wq_state = cmd.curr_wq_state;
|
|
|
+ wq_attr.wq_state = cmd.wq_state;
|
|
|
+ ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
|
|
|
+ put_wq_read(wq);
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
|
|
|
struct ib_device *ib_dev,
|
|
|
struct ib_udata *ucore,
|