|
@@ -55,7 +55,8 @@ static inline void xenvif_stop_queue(struct xenvif_queue *queue)
|
|
|
|
|
|
int xenvif_schedulable(struct xenvif *vif)
|
|
int xenvif_schedulable(struct xenvif *vif)
|
|
{
|
|
{
|
|
- return netif_running(vif->dev) && netif_carrier_ok(vif->dev);
|
|
|
|
|
|
+ return netif_running(vif->dev) &&
|
|
|
|
+ test_bit(VIF_STATUS_CONNECTED, &vif->status);
|
|
}
|
|
}
|
|
|
|
|
|
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
|
|
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
|
|
@@ -77,8 +78,12 @@ int xenvif_poll(struct napi_struct *napi, int budget)
|
|
/* This vif is rogue, we pretend we've there is nothing to do
|
|
/* This vif is rogue, we pretend we've there is nothing to do
|
|
* for this vif to deschedule it from NAPI. But this interface
|
|
* for this vif to deschedule it from NAPI. But this interface
|
|
* will be turned off in thread context later.
|
|
* will be turned off in thread context later.
|
|
|
|
+ * Also, if a guest doesn't post enough slots to receive data on one of
|
|
|
|
+ * its queues, the carrier goes down and NAPI is descheduled here so
|
|
|
|
+ * the guest can't send more packets until it's ready to receive.
|
|
*/
|
|
*/
|
|
- if (unlikely(queue->vif->disabled)) {
|
|
|
|
|
|
+ if (unlikely(queue->vif->disabled ||
|
|
|
|
+ !netif_carrier_ok(queue->vif->dev))) {
|
|
napi_complete(napi);
|
|
napi_complete(napi);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -96,7 +101,16 @@ int xenvif_poll(struct napi_struct *napi, int budget)
|
|
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
|
|
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
|
|
{
|
|
{
|
|
struct xenvif_queue *queue = dev_id;
|
|
struct xenvif_queue *queue = dev_id;
|
|
|
|
+ struct netdev_queue *net_queue =
|
|
|
|
+ netdev_get_tx_queue(queue->vif->dev, queue->id);
|
|
|
|
|
|
|
|
+ /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
|
|
|
|
+ * the carrier went down and this queue was previously blocked
|
|
|
|
+ */
|
|
|
|
+ if (unlikely(netif_tx_queue_stopped(net_queue) ||
|
|
|
|
+ (!netif_carrier_ok(queue->vif->dev) &&
|
|
|
|
+ test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
|
|
|
|
+ set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
|
|
xenvif_kick_thread(queue);
|
|
xenvif_kick_thread(queue);
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
return IRQ_HANDLED;
|
|
@@ -124,16 +138,14 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
|
|
netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
|
|
netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
|
|
}
|
|
}
|
|
|
|
|
|
-/* Callback to wake the queue and drain it on timeout */
|
|
|
|
-static void xenvif_wake_queue_callback(unsigned long data)
|
|
|
|
|
|
+/* Callback to wake the queue's thread and turn the carrier off on timeout */
|
|
|
|
+static void xenvif_rx_stalled(unsigned long data)
|
|
{
|
|
{
|
|
struct xenvif_queue *queue = (struct xenvif_queue *)data;
|
|
struct xenvif_queue *queue = (struct xenvif_queue *)data;
|
|
|
|
|
|
if (xenvif_queue_stopped(queue)) {
|
|
if (xenvif_queue_stopped(queue)) {
|
|
- netdev_err(queue->vif->dev, "draining TX queue\n");
|
|
|
|
- queue->rx_queue_purge = true;
|
|
|
|
|
|
+ set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
|
|
xenvif_kick_thread(queue);
|
|
xenvif_kick_thread(queue);
|
|
- xenvif_wake_queue(queue);
|
|
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -182,11 +194,11 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
* drain.
|
|
* drain.
|
|
*/
|
|
*/
|
|
if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
|
|
if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
|
|
- queue->wake_queue.function = xenvif_wake_queue_callback;
|
|
|
|
- queue->wake_queue.data = (unsigned long)queue;
|
|
|
|
|
|
+ queue->rx_stalled.function = xenvif_rx_stalled;
|
|
|
|
+ queue->rx_stalled.data = (unsigned long)queue;
|
|
xenvif_stop_queue(queue);
|
|
xenvif_stop_queue(queue);
|
|
- mod_timer(&queue->wake_queue,
|
|
|
|
- jiffies + rx_drain_timeout_jiffies);
|
|
|
|
|
|
+ mod_timer(&queue->rx_stalled,
|
|
|
|
+ jiffies + rx_drain_timeout_jiffies);
|
|
}
|
|
}
|
|
|
|
|
|
skb_queue_tail(&queue->rx_queue, skb);
|
|
skb_queue_tail(&queue->rx_queue, skb);
|
|
@@ -267,7 +279,7 @@ static void xenvif_down(struct xenvif *vif)
|
|
static int xenvif_open(struct net_device *dev)
|
|
static int xenvif_open(struct net_device *dev)
|
|
{
|
|
{
|
|
struct xenvif *vif = netdev_priv(dev);
|
|
struct xenvif *vif = netdev_priv(dev);
|
|
- if (netif_carrier_ok(dev))
|
|
|
|
|
|
+ if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
|
|
xenvif_up(vif);
|
|
xenvif_up(vif);
|
|
netif_tx_start_all_queues(dev);
|
|
netif_tx_start_all_queues(dev);
|
|
return 0;
|
|
return 0;
|
|
@@ -276,7 +288,7 @@ static int xenvif_open(struct net_device *dev)
|
|
static int xenvif_close(struct net_device *dev)
|
|
static int xenvif_close(struct net_device *dev)
|
|
{
|
|
{
|
|
struct xenvif *vif = netdev_priv(dev);
|
|
struct xenvif *vif = netdev_priv(dev);
|
|
- if (netif_carrier_ok(dev))
|
|
|
|
|
|
+ if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
|
|
xenvif_down(vif);
|
|
xenvif_down(vif);
|
|
netif_tx_stop_all_queues(dev);
|
|
netif_tx_stop_all_queues(dev);
|
|
return 0;
|
|
return 0;
|
|
@@ -514,7 +526,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
|
|
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
|
|
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
|
|
}
|
|
}
|
|
|
|
|
|
- init_timer(&queue->wake_queue);
|
|
|
|
|
|
+ init_timer(&queue->rx_stalled);
|
|
|
|
|
|
netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
|
|
netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
|
|
XENVIF_NAPI_WEIGHT);
|
|
XENVIF_NAPI_WEIGHT);
|
|
@@ -528,6 +540,7 @@ void xenvif_carrier_on(struct xenvif *vif)
|
|
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
|
|
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
|
|
dev_set_mtu(vif->dev, ETH_DATA_LEN);
|
|
dev_set_mtu(vif->dev, ETH_DATA_LEN);
|
|
netdev_update_features(vif->dev);
|
|
netdev_update_features(vif->dev);
|
|
|
|
+ set_bit(VIF_STATUS_CONNECTED, &vif->status);
|
|
netif_carrier_on(vif->dev);
|
|
netif_carrier_on(vif->dev);
|
|
if (netif_running(vif->dev))
|
|
if (netif_running(vif->dev))
|
|
xenvif_up(vif);
|
|
xenvif_up(vif);
|
|
@@ -625,9 +638,11 @@ void xenvif_carrier_off(struct xenvif *vif)
|
|
struct net_device *dev = vif->dev;
|
|
struct net_device *dev = vif->dev;
|
|
|
|
|
|
rtnl_lock();
|
|
rtnl_lock();
|
|
- netif_carrier_off(dev); /* discard queued packets */
|
|
|
|
- if (netif_running(dev))
|
|
|
|
- xenvif_down(vif);
|
|
|
|
|
|
+ if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
|
|
|
|
+ netif_carrier_off(dev); /* discard queued packets */
|
|
|
|
+ if (netif_running(dev))
|
|
|
|
+ xenvif_down(vif);
|
|
|
|
+ }
|
|
rtnl_unlock();
|
|
rtnl_unlock();
|
|
}
|
|
}
|
|
|
|
|
|
@@ -656,14 +671,13 @@ void xenvif_disconnect(struct xenvif *vif)
|
|
unsigned int num_queues = vif->num_queues;
|
|
unsigned int num_queues = vif->num_queues;
|
|
unsigned int queue_index;
|
|
unsigned int queue_index;
|
|
|
|
|
|
- if (netif_carrier_ok(vif->dev))
|
|
|
|
- xenvif_carrier_off(vif);
|
|
|
|
|
|
+ xenvif_carrier_off(vif);
|
|
|
|
|
|
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
|
|
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
|
|
queue = &vif->queues[queue_index];
|
|
queue = &vif->queues[queue_index];
|
|
|
|
|
|
if (queue->task) {
|
|
if (queue->task) {
|
|
- del_timer_sync(&queue->wake_queue);
|
|
|
|
|
|
+ del_timer_sync(&queue->rx_stalled);
|
|
kthread_stop(queue->task);
|
|
kthread_stop(queue->task);
|
|
queue->task = NULL;
|
|
queue->task = NULL;
|
|
}
|
|
}
|
|
@@ -705,16 +719,12 @@ void xenvif_free(struct xenvif *vif)
|
|
/* Here we want to avoid timeout messages if an skb can be legitimately
|
|
/* Here we want to avoid timeout messages if an skb can be legitimately
|
|
* stuck somewhere else. Realistically this could be an another vif's
|
|
* stuck somewhere else. Realistically this could be an another vif's
|
|
* internal or QDisc queue. That another vif also has this
|
|
* internal or QDisc queue. That another vif also has this
|
|
- * rx_drain_timeout_msecs timeout, but the timer only ditches the
|
|
|
|
- * internal queue. After that, the QDisc queue can put in worst case
|
|
|
|
- * XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS skbs into that another vif's
|
|
|
|
- * internal queue, so we need several rounds of such timeouts until we
|
|
|
|
- * can be sure that no another vif should have skb's from us. We are
|
|
|
|
- * not sending more skb's, so newly stuck packets are not interesting
|
|
|
|
- * for us here.
|
|
|
|
|
|
+ * rx_drain_timeout_msecs timeout, so give it time to drain out.
|
|
|
|
+ * Although if that other guest wakes up just before its timeout happens
|
|
|
|
+ * and takes only one skb from QDisc, it can hold onto other skbs for a
|
|
|
|
+ * longer period.
|
|
*/
|
|
*/
|
|
- unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000) *
|
|
|
|
- DIV_ROUND_UP(XENVIF_QUEUE_LENGTH, (XEN_NETIF_RX_RING_SIZE / MAX_SKB_FRAGS));
|
|
|
|
|
|
+ unsigned int worst_case_skb_lifetime = (rx_drain_timeout_msecs/1000);
|
|
|
|
|
|
unregister_netdev(vif->dev);
|
|
unregister_netdev(vif->dev);
|
|
|
|
|