|
@@ -105,15 +105,21 @@ static void queue_process(struct work_struct *work)
|
|
while ((skb = skb_dequeue(&npinfo->txq))) {
|
|
while ((skb = skb_dequeue(&npinfo->txq))) {
|
|
struct net_device *dev = skb->dev;
|
|
struct net_device *dev = skb->dev;
|
|
struct netdev_queue *txq;
|
|
struct netdev_queue *txq;
|
|
|
|
+ unsigned int q_index;
|
|
|
|
|
|
if (!netif_device_present(dev) || !netif_running(dev)) {
|
|
if (!netif_device_present(dev) || !netif_running(dev)) {
|
|
kfree_skb(skb);
|
|
kfree_skb(skb);
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
- txq = skb_get_tx_queue(dev, skb);
|
|
|
|
-
|
|
|
|
local_irq_save(flags);
|
|
local_irq_save(flags);
|
|
|
|
+ /* check if skb->queue_mapping is still valid */
|
|
|
|
+ q_index = skb_get_queue_mapping(skb);
|
|
|
|
+ if (unlikely(q_index >= dev->real_num_tx_queues)) {
|
|
|
|
+ q_index = q_index % dev->real_num_tx_queues;
|
|
|
|
+ skb_set_queue_mapping(skb, q_index);
|
|
|
|
+ }
|
|
|
|
+ txq = netdev_get_tx_queue(dev, q_index);
|
|
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
|
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
|
if (netif_xmit_frozen_or_stopped(txq) ||
|
|
if (netif_xmit_frozen_or_stopped(txq) ||
|
|
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
|
|
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
|