|
@@ -474,7 +474,7 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
|
|
|
}
|
|
|
|
|
|
spin_lock_irq(&priv->lock);
|
|
|
- queue_delayed_work(ipoib_workqueue,
|
|
|
+ queue_delayed_work(priv->wq,
|
|
|
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
|
|
|
/* Add this entry to passive ids list head, but do not re-add it
|
|
|
* if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
|
|
@@ -576,7 +576,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
|
|
|
ipoib_cm_start_rx_drain(priv);
|
|
|
- queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
|
|
|
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
} else
|
|
|
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
|
|
@@ -603,7 +603,7 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
list_move(&p->list, &priv->cm.rx_reap_list);
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
|
- queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
|
|
|
+ queue_work(priv->wq, &priv->cm.rx_reap_task);
|
|
|
}
|
|
|
return;
|
|
|
}
|
|
@@ -827,7 +827,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
|
|
|
|
|
|
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
|
|
|
list_move(&tx->list, &priv->cm.reap_list);
|
|
|
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
|
|
|
+ queue_work(priv->wq, &priv->cm.reap_task);
|
|
|
}
|
|
|
|
|
|
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
|
|
@@ -1255,7 +1255,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
|
|
|
|
|
|
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
|
|
|
list_move(&tx->list, &priv->cm.reap_list);
|
|
|
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
|
|
|
+ queue_work(priv->wq, &priv->cm.reap_task);
|
|
|
}
|
|
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags);
|
|
@@ -1284,7 +1284,7 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
|
|
|
tx->dev = dev;
|
|
|
list_add(&tx->list, &priv->cm.start_list);
|
|
|
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
|
|
|
- queue_work(ipoib_workqueue, &priv->cm.start_task);
|
|
|
+ queue_work(priv->wq, &priv->cm.start_task);
|
|
|
return tx;
|
|
|
}
|
|
|
|
|
@@ -1295,7 +1295,7 @@ void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
|
|
|
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
|
|
|
spin_lock_irqsave(&priv->lock, flags);
|
|
|
list_move(&tx->list, &priv->cm.reap_list);
|
|
|
- queue_work(ipoib_workqueue, &priv->cm.reap_task);
|
|
|
+ queue_work(priv->wq, &priv->cm.reap_task);
|
|
|
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
|
|
|
tx->neigh->daddr + 4);
|
|
|
tx->neigh = NULL;
|
|
@@ -1417,7 +1417,7 @@ void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
|
|
|
|
|
|
skb_queue_tail(&priv->cm.skb_queue, skb);
|
|
|
if (e)
|
|
|
- queue_work(ipoib_workqueue, &priv->cm.skb_task);
|
|
|
+ queue_work(priv->wq, &priv->cm.skb_task);
|
|
|
}
|
|
|
|
|
|
static void ipoib_cm_rx_reap(struct work_struct *work)
|
|
@@ -1450,7 +1450,7 @@ static void ipoib_cm_stale_task(struct work_struct *work)
|
|
|
}
|
|
|
|
|
|
if (!list_empty(&priv->cm.passive_ids))
|
|
|
- queue_delayed_work(ipoib_workqueue,
|
|
|
+ queue_delayed_work(priv->wq,
|
|
|
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
|
|
|
spin_unlock_irq(&priv->lock);
|
|
|
}
|