|
@@ -112,12 +112,12 @@ static void ib_cq_poll_work(struct work_struct *work)
|
|
|
IB_POLL_BATCH);
|
|
|
if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
|
|
|
ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
|
|
|
- queue_work(ib_comp_wq, &cq->work);
|
|
|
+ queue_work(cq->comp_wq, &cq->work);
|
|
|
}
|
|
|
|
|
|
static void ib_cq_completion_workqueue(struct ib_cq *cq, void *private)
|
|
|
{
|
|
|
- queue_work(ib_comp_wq, &cq->work);
|
|
|
+ queue_work(cq->comp_wq, &cq->work);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -175,9 +175,12 @@ struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
|
|
|
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
|
|
|
break;
|
|
|
case IB_POLL_WORKQUEUE:
|
|
|
+ case IB_POLL_UNBOUND_WORKQUEUE:
|
|
|
cq->comp_handler = ib_cq_completion_workqueue;
|
|
|
INIT_WORK(&cq->work, ib_cq_poll_work);
|
|
|
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
|
|
|
+ cq->comp_wq = (cq->poll_ctx == IB_POLL_WORKQUEUE) ?
|
|
|
+ ib_comp_wq : ib_comp_unbound_wq;
|
|
|
break;
|
|
|
default:
|
|
|
ret = -EINVAL;
|
|
@@ -213,6 +216,7 @@ void ib_free_cq(struct ib_cq *cq)
|
|
|
irq_poll_disable(&cq->iop);
|
|
|
break;
|
|
|
case IB_POLL_WORKQUEUE:
|
|
|
+ case IB_POLL_UNBOUND_WORKQUEUE:
|
|
|
cancel_work_sync(&cq->work);
|
|
|
break;
|
|
|
default:
|