|
@@ -119,18 +119,17 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
|
|
|
if (cq->notify == IB_CQ_NEXT_COMP ||
|
|
|
(cq->notify == IB_CQ_SOLICITED &&
|
|
|
(solicited || entry->status != IB_WC_SUCCESS))) {
|
|
|
- struct kthread_worker *worker;
|
|
|
/*
|
|
|
* This will cause send_complete() to be called in
|
|
|
* another thread.
|
|
|
*/
|
|
|
- smp_read_barrier_depends(); /* see rvt_cq_exit */
|
|
|
- worker = cq->rdi->worker;
|
|
|
- if (likely(worker)) {
|
|
|
+ spin_lock(&cq->rdi->n_cqs_lock);
|
|
|
+ if (likely(cq->rdi->worker)) {
|
|
|
cq->notify = RVT_CQ_NONE;
|
|
|
cq->triggered++;
|
|
|
- kthread_queue_work(worker, &cq->comptask);
|
|
|
+ kthread_queue_work(cq->rdi->worker, &cq->comptask);
|
|
|
}
|
|
|
+ spin_unlock(&cq->rdi->n_cqs_lock);
|
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&cq->lock, flags);
|
|
@@ -240,15 +239,15 @@ struct ib_cq *rvt_create_cq(struct ib_device *ibdev,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- spin_lock(&rdi->n_cqs_lock);
|
|
|
+ spin_lock_irq(&rdi->n_cqs_lock);
|
|
|
if (rdi->n_cqs_allocated == rdi->dparms.props.max_cq) {
|
|
|
- spin_unlock(&rdi->n_cqs_lock);
|
|
|
+ spin_unlock_irq(&rdi->n_cqs_lock);
|
|
|
ret = ERR_PTR(-ENOMEM);
|
|
|
goto bail_ip;
|
|
|
}
|
|
|
|
|
|
rdi->n_cqs_allocated++;
|
|
|
- spin_unlock(&rdi->n_cqs_lock);
|
|
|
+ spin_unlock_irq(&rdi->n_cqs_lock);
|
|
|
|
|
|
if (cq->ip) {
|
|
|
spin_lock_irq(&rdi->pending_lock);
|
|
@@ -296,9 +295,9 @@ int rvt_destroy_cq(struct ib_cq *ibcq)
|
|
|
struct rvt_dev_info *rdi = cq->rdi;
|
|
|
|
|
|
kthread_flush_work(&cq->comptask);
|
|
|
- spin_lock(&rdi->n_cqs_lock);
|
|
|
+ spin_lock_irq(&rdi->n_cqs_lock);
|
|
|
rdi->n_cqs_allocated--;
|
|
|
- spin_unlock(&rdi->n_cqs_lock);
|
|
|
+ spin_unlock_irq(&rdi->n_cqs_lock);
|
|
|
if (cq->ip)
|
|
|
kref_put(&cq->ip->ref, rvt_release_mmap_info);
|
|
|
else
|
|
@@ -541,12 +540,15 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
|
|
|
{
|
|
|
struct kthread_worker *worker;
|
|
|
|
|
|
- worker = rdi->worker;
|
|
|
- if (!worker)
|
|
|
+ /* block future queuing from send_complete() */
|
|
|
+ spin_lock_irq(&rdi->n_cqs_lock);
|
|
|
+ if (!rdi->worker) {
|
|
|
+ spin_unlock_irq(&rdi->n_cqs_lock);
|
|
|
return;
|
|
|
- /* blocks future queuing from send_complete() */
|
|
|
+ }
|
|
|
rdi->worker = NULL;
|
|
|
- smp_wmb(); /* See rdi_cq_enter */
|
|
|
+ spin_unlock_irq(&rdi->n_cqs_lock);
|
|
|
+
|
|
|
kthread_flush_worker(worker);
|
|
|
kthread_stop(worker->task);
|
|
|
kfree(worker);
|