|
@@ -731,7 +731,7 @@ static void mem_timer(unsigned long data)
|
|
struct qib_qp_priv *priv = NULL;
|
|
struct qib_qp_priv *priv = NULL;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&dev->pending_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&dev->rdi.pending_lock, flags);
|
|
if (!list_empty(list)) {
|
|
if (!list_empty(list)) {
|
|
priv = list_entry(list->next, struct qib_qp_priv, iowait);
|
|
priv = list_entry(list->next, struct qib_qp_priv, iowait);
|
|
qp = priv->owner;
|
|
qp = priv->owner;
|
|
@@ -740,7 +740,7 @@ static void mem_timer(unsigned long data)
|
|
if (!list_empty(list))
|
|
if (!list_empty(list))
|
|
mod_timer(&dev->mem_timer, jiffies + 1);
|
|
mod_timer(&dev->mem_timer, jiffies + 1);
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&dev->pending_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
|
|
|
|
|
|
if (qp) {
|
|
if (qp) {
|
|
spin_lock_irqsave(&qp->s_lock, flags);
|
|
spin_lock_irqsave(&qp->s_lock, flags);
|
|
@@ -955,13 +955,13 @@ static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
spin_lock_irqsave(&qp->s_lock, flags);
|
|
spin_lock_irqsave(&qp->s_lock, flags);
|
|
- spin_lock(&dev->pending_lock);
|
|
|
|
|
|
+ spin_lock(&dev->rdi.pending_lock);
|
|
|
|
|
|
if (!list_empty(&dev->txreq_free)) {
|
|
if (!list_empty(&dev->txreq_free)) {
|
|
struct list_head *l = dev->txreq_free.next;
|
|
struct list_head *l = dev->txreq_free.next;
|
|
|
|
|
|
list_del(l);
|
|
list_del(l);
|
|
- spin_unlock(&dev->pending_lock);
|
|
|
|
|
|
+ spin_unlock(&dev->rdi.pending_lock);
|
|
spin_unlock_irqrestore(&qp->s_lock, flags);
|
|
spin_unlock_irqrestore(&qp->s_lock, flags);
|
|
tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
|
|
tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
|
|
} else {
|
|
} else {
|
|
@@ -972,7 +972,7 @@ static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
|
|
list_add_tail(&priv->iowait, &dev->txwait);
|
|
list_add_tail(&priv->iowait, &dev->txwait);
|
|
}
|
|
}
|
|
qp->s_flags &= ~QIB_S_BUSY;
|
|
qp->s_flags &= ~QIB_S_BUSY;
|
|
- spin_unlock(&dev->pending_lock);
|
|
|
|
|
|
+ spin_unlock(&dev->rdi.pending_lock);
|
|
spin_unlock_irqrestore(&qp->s_lock, flags);
|
|
spin_unlock_irqrestore(&qp->s_lock, flags);
|
|
tx = ERR_PTR(-EBUSY);
|
|
tx = ERR_PTR(-EBUSY);
|
|
}
|
|
}
|
|
@@ -985,17 +985,17 @@ static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
|
|
struct qib_verbs_txreq *tx;
|
|
struct qib_verbs_txreq *tx;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&dev->pending_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&dev->rdi.pending_lock, flags);
|
|
/* assume the list non empty */
|
|
/* assume the list non empty */
|
|
if (likely(!list_empty(&dev->txreq_free))) {
|
|
if (likely(!list_empty(&dev->txreq_free))) {
|
|
struct list_head *l = dev->txreq_free.next;
|
|
struct list_head *l = dev->txreq_free.next;
|
|
|
|
|
|
list_del(l);
|
|
list_del(l);
|
|
- spin_unlock_irqrestore(&dev->pending_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
|
|
tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
|
|
tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
|
|
} else {
|
|
} else {
|
|
/* call slow path to get the extra lock */
|
|
/* call slow path to get the extra lock */
|
|
- spin_unlock_irqrestore(&dev->pending_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
|
|
tx = __get_txreq(dev, qp);
|
|
tx = __get_txreq(dev, qp);
|
|
}
|
|
}
|
|
return tx;
|
|
return tx;
|
|
@@ -1025,7 +1025,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
|
|
kfree(tx->align_buf);
|
|
kfree(tx->align_buf);
|
|
}
|
|
}
|
|
|
|
|
|
- spin_lock_irqsave(&dev->pending_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&dev->rdi.pending_lock, flags);
|
|
|
|
|
|
/* Put struct back on free list */
|
|
/* Put struct back on free list */
|
|
list_add(&tx->txreq.list, &dev->txreq_free);
|
|
list_add(&tx->txreq.list, &dev->txreq_free);
|
|
@@ -1037,7 +1037,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
|
|
qp = priv->owner;
|
|
qp = priv->owner;
|
|
list_del_init(&priv->iowait);
|
|
list_del_init(&priv->iowait);
|
|
atomic_inc(&qp->refcount);
|
|
atomic_inc(&qp->refcount);
|
|
- spin_unlock_irqrestore(&dev->pending_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
|
|
|
|
|
|
spin_lock_irqsave(&qp->s_lock, flags);
|
|
spin_lock_irqsave(&qp->s_lock, flags);
|
|
if (qp->s_flags & QIB_S_WAIT_TX) {
|
|
if (qp->s_flags & QIB_S_WAIT_TX) {
|
|
@@ -1049,7 +1049,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx)
|
|
if (atomic_dec_and_test(&qp->refcount))
|
|
if (atomic_dec_and_test(&qp->refcount))
|
|
wake_up(&qp->wait);
|
|
wake_up(&qp->wait);
|
|
} else
|
|
} else
|
|
- spin_unlock_irqrestore(&dev->pending_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1068,7 +1068,7 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
|
|
|
|
|
|
n = 0;
|
|
n = 0;
|
|
dev = &ppd->dd->verbs_dev;
|
|
dev = &ppd->dd->verbs_dev;
|
|
- spin_lock(&dev->pending_lock);
|
|
|
|
|
|
+ spin_lock(&dev->rdi.pending_lock);
|
|
|
|
|
|
/* Search wait list for first QP wanting DMA descriptors. */
|
|
/* Search wait list for first QP wanting DMA descriptors. */
|
|
list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
|
|
list_for_each_entry_safe(qpp, nqpp, &dev->dmawait, iowait) {
|
|
@@ -1086,7 +1086,7 @@ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail)
|
|
qps[n++] = qp;
|
|
qps[n++] = qp;
|
|
}
|
|
}
|
|
|
|
|
|
- spin_unlock(&dev->pending_lock);
|
|
|
|
|
|
+ spin_unlock(&dev->rdi.pending_lock);
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
for (i = 0; i < n; i++) {
|
|
qp = qps[i];
|
|
qp = qps[i];
|
|
@@ -1147,14 +1147,14 @@ static int wait_kmem(struct qib_ibdev *dev, struct rvt_qp *qp)
|
|
|
|
|
|
spin_lock_irqsave(&qp->s_lock, flags);
|
|
spin_lock_irqsave(&qp->s_lock, flags);
|
|
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
|
|
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
|
|
- spin_lock(&dev->pending_lock);
|
|
|
|
|
|
+ spin_lock(&dev->rdi.pending_lock);
|
|
if (list_empty(&priv->iowait)) {
|
|
if (list_empty(&priv->iowait)) {
|
|
if (list_empty(&dev->memwait))
|
|
if (list_empty(&dev->memwait))
|
|
mod_timer(&dev->mem_timer, jiffies + 1);
|
|
mod_timer(&dev->mem_timer, jiffies + 1);
|
|
qp->s_flags |= QIB_S_WAIT_KMEM;
|
|
qp->s_flags |= QIB_S_WAIT_KMEM;
|
|
list_add_tail(&priv->iowait, &dev->memwait);
|
|
list_add_tail(&priv->iowait, &dev->memwait);
|
|
}
|
|
}
|
|
- spin_unlock(&dev->pending_lock);
|
|
|
|
|
|
+ spin_unlock(&dev->rdi.pending_lock);
|
|
qp->s_flags &= ~QIB_S_BUSY;
|
|
qp->s_flags &= ~QIB_S_BUSY;
|
|
ret = -EBUSY;
|
|
ret = -EBUSY;
|
|
}
|
|
}
|
|
@@ -1284,7 +1284,7 @@ static int no_bufs_available(struct rvt_qp *qp)
|
|
*/
|
|
*/
|
|
spin_lock_irqsave(&qp->s_lock, flags);
|
|
spin_lock_irqsave(&qp->s_lock, flags);
|
|
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
|
|
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) {
|
|
- spin_lock(&dev->pending_lock);
|
|
|
|
|
|
+ spin_lock(&dev->rdi.pending_lock);
|
|
if (list_empty(&priv->iowait)) {
|
|
if (list_empty(&priv->iowait)) {
|
|
dev->n_piowait++;
|
|
dev->n_piowait++;
|
|
qp->s_flags |= QIB_S_WAIT_PIO;
|
|
qp->s_flags |= QIB_S_WAIT_PIO;
|
|
@@ -1292,7 +1292,7 @@ static int no_bufs_available(struct rvt_qp *qp)
|
|
dd = dd_from_dev(dev);
|
|
dd = dd_from_dev(dev);
|
|
dd->f_wantpiobuf_intr(dd, 1);
|
|
dd->f_wantpiobuf_intr(dd, 1);
|
|
}
|
|
}
|
|
- spin_unlock(&dev->pending_lock);
|
|
|
|
|
|
+ spin_unlock(&dev->rdi.pending_lock);
|
|
qp->s_flags &= ~QIB_S_BUSY;
|
|
qp->s_flags &= ~QIB_S_BUSY;
|
|
ret = -EBUSY;
|
|
ret = -EBUSY;
|
|
}
|
|
}
|
|
@@ -1556,7 +1556,7 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
|
|
* could end up with QPs on the wait list with the interrupt
|
|
* could end up with QPs on the wait list with the interrupt
|
|
* disabled.
|
|
* disabled.
|
|
*/
|
|
*/
|
|
- spin_lock_irqsave(&dev->pending_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&dev->rdi.pending_lock, flags);
|
|
while (!list_empty(list)) {
|
|
while (!list_empty(list)) {
|
|
if (n == ARRAY_SIZE(qps))
|
|
if (n == ARRAY_SIZE(qps))
|
|
goto full;
|
|
goto full;
|
|
@@ -1568,7 +1568,7 @@ void qib_ib_piobufavail(struct qib_devdata *dd)
|
|
}
|
|
}
|
|
dd->f_wantpiobuf_intr(dd, 0);
|
|
dd->f_wantpiobuf_intr(dd, 0);
|
|
full:
|
|
full:
|
|
- spin_unlock_irqrestore(&dev->pending_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&dev->rdi.pending_lock, flags);
|
|
|
|
|
|
for (i = 0; i < n; i++) {
|
|
for (i = 0; i < n; i++) {
|
|
qp = qps[i];
|
|
qp = qps[i];
|
|
@@ -1992,10 +1992,6 @@ int qib_register_ib_device(struct qib_devdata *dd)
|
|
|
|
|
|
qib_init_qpn_table(dd, &dev->qpn_table);
|
|
qib_init_qpn_table(dd, &dev->qpn_table);
|
|
|
|
|
|
- INIT_LIST_HEAD(&dev->pending_mmaps);
|
|
|
|
- spin_lock_init(&dev->pending_lock);
|
|
|
|
- dev->mmap_offset = PAGE_SIZE;
|
|
|
|
- spin_lock_init(&dev->mmap_offset_lock);
|
|
|
|
INIT_LIST_HEAD(&dev->piowait);
|
|
INIT_LIST_HEAD(&dev->piowait);
|
|
INIT_LIST_HEAD(&dev->dmawait);
|
|
INIT_LIST_HEAD(&dev->dmawait);
|
|
INIT_LIST_HEAD(&dev->txwait);
|
|
INIT_LIST_HEAD(&dev->txwait);
|
|
@@ -2115,7 +2111,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
|
|
ibdev->attach_mcast = qib_multicast_attach;
|
|
ibdev->attach_mcast = qib_multicast_attach;
|
|
ibdev->detach_mcast = qib_multicast_detach;
|
|
ibdev->detach_mcast = qib_multicast_detach;
|
|
ibdev->process_mad = qib_process_mad;
|
|
ibdev->process_mad = qib_process_mad;
|
|
- ibdev->mmap = qib_mmap;
|
|
|
|
|
|
+ ibdev->mmap = NULL;
|
|
ibdev->dma_ops = NULL;
|
|
ibdev->dma_ops = NULL;
|
|
ibdev->get_port_immutable = qib_port_immutable;
|
|
ibdev->get_port_immutable = qib_port_immutable;
|
|
|
|
|