|
@@ -3462,6 +3462,17 @@ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
|
|
|
txq->xmit_lock_owner = cpu;
|
|
|
}
|
|
|
|
|
|
+static inline bool __netif_tx_acquire(struct netdev_queue *txq)
|
|
|
+{
|
|
|
+ __acquire(&txq->_xmit_lock);
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void __netif_tx_release(struct netdev_queue *txq)
|
|
|
+{
|
|
|
+ __release(&txq->_xmit_lock);
|
|
|
+}
|
|
|
+
|
|
|
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
|
|
|
{
|
|
|
spin_lock_bh(&txq->_xmit_lock);
|
|
@@ -3563,17 +3574,21 @@ static inline void netif_tx_unlock_bh(struct net_device *dev)
|
|
|
#define HARD_TX_LOCK(dev, txq, cpu) { \
|
|
|
if ((dev->features & NETIF_F_LLTX) == 0) { \
|
|
|
__netif_tx_lock(txq, cpu); \
|
|
|
+ } else { \
|
|
|
+ __netif_tx_acquire(txq); \
|
|
|
} \
|
|
|
}
|
|
|
|
|
|
#define HARD_TX_TRYLOCK(dev, txq) \
|
|
|
(((dev->features & NETIF_F_LLTX) == 0) ? \
|
|
|
__netif_tx_trylock(txq) : \
|
|
|
- true )
|
|
|
+ __netif_tx_acquire(txq))
|
|
|
|
|
|
#define HARD_TX_UNLOCK(dev, txq) { \
|
|
|
if ((dev->features & NETIF_F_LLTX) == 0) { \
|
|
|
__netif_tx_unlock(txq); \
|
|
|
+ } else { \
|
|
|
+ __netif_tx_release(txq); \
|
|
|
} \
|
|
|
}
|
|
|
|