|
@@ -819,10 +819,6 @@ struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
|
|
root_lock = qdisc_lock(oqdisc);
|
|
root_lock = qdisc_lock(oqdisc);
|
|
spin_lock_bh(root_lock);
|
|
spin_lock_bh(root_lock);
|
|
|
|
|
|
- /* Prune old scheduler */
|
|
|
|
- if (oqdisc && refcount_read(&oqdisc->refcnt) <= 1)
|
|
|
|
- qdisc_reset(oqdisc);
|
|
|
|
-
|
|
|
|
/* ... and graft new one */
|
|
/* ... and graft new one */
|
|
if (qdisc == NULL)
|
|
if (qdisc == NULL)
|
|
qdisc = &noop_qdisc;
|
|
qdisc = &noop_qdisc;
|
|
@@ -977,6 +973,16 @@ static bool some_qdisc_is_busy(struct net_device *dev)
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void dev_qdisc_reset(struct net_device *dev,
|
|
|
|
+ struct netdev_queue *dev_queue,
|
|
|
|
+ void *none)
|
|
|
|
+{
|
|
|
|
+ struct Qdisc *qdisc = dev_queue->qdisc_sleeping;
|
|
|
|
+
|
|
|
|
+ if (qdisc)
|
|
|
|
+ qdisc_reset(qdisc);
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* dev_deactivate_many - deactivate transmissions on several devices
|
|
* dev_deactivate_many - deactivate transmissions on several devices
|
|
* @head: list of devices to deactivate
|
|
* @head: list of devices to deactivate
|
|
@@ -987,7 +993,6 @@ static bool some_qdisc_is_busy(struct net_device *dev)
|
|
void dev_deactivate_many(struct list_head *head)
|
|
void dev_deactivate_many(struct list_head *head)
|
|
{
|
|
{
|
|
struct net_device *dev;
|
|
struct net_device *dev;
|
|
- bool sync_needed = false;
|
|
|
|
|
|
|
|
list_for_each_entry(dev, head, close_list) {
|
|
list_for_each_entry(dev, head, close_list) {
|
|
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
|
|
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
|
|
@@ -997,20 +1002,25 @@ void dev_deactivate_many(struct list_head *head)
|
|
&noop_qdisc);
|
|
&noop_qdisc);
|
|
|
|
|
|
dev_watchdog_down(dev);
|
|
dev_watchdog_down(dev);
|
|
- sync_needed |= !dev->dismantle;
|
|
|
|
}
|
|
}
|
|
|
|
|
|
/* Wait for outstanding qdisc-less dev_queue_xmit calls.
|
|
/* Wait for outstanding qdisc-less dev_queue_xmit calls.
|
|
* This is avoided if all devices are in dismantle phase :
|
|
* This is avoided if all devices are in dismantle phase :
|
|
* Caller will call synchronize_net() for us
|
|
* Caller will call synchronize_net() for us
|
|
*/
|
|
*/
|
|
- if (sync_needed)
|
|
|
|
- synchronize_net();
|
|
|
|
|
|
+ synchronize_net();
|
|
|
|
|
|
/* Wait for outstanding qdisc_run calls. */
|
|
/* Wait for outstanding qdisc_run calls. */
|
|
- list_for_each_entry(dev, head, close_list)
|
|
|
|
|
|
+ list_for_each_entry(dev, head, close_list) {
|
|
while (some_qdisc_is_busy(dev))
|
|
while (some_qdisc_is_busy(dev))
|
|
yield();
|
|
yield();
|
|
|
|
+ /* The new qdisc is assigned at this point so we can safely
|
|
|
|
+ * unwind stale skb lists and qdisc statistics
|
|
|
|
+ */
|
|
|
|
+ netdev_for_each_tx_queue(dev, dev_qdisc_reset, NULL);
|
|
|
|
+ if (dev_ingress_queue(dev))
|
|
|
|
+ dev_qdisc_reset(dev, dev_ingress_queue(dev), NULL);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
void dev_deactivate(struct net_device *dev)
|
|
void dev_deactivate(struct net_device *dev)
|