|
@@ -33,9 +33,10 @@
|
|
static int napi_weight = NAPI_POLL_WEIGHT;
|
|
static int napi_weight = NAPI_POLL_WEIGHT;
|
|
module_param(napi_weight, int, 0444);
|
|
module_param(napi_weight, int, 0444);
|
|
|
|
|
|
-static bool csum = true, gso = true;
|
|
|
|
|
|
+static bool csum = true, gso = true, napi_tx;
|
|
module_param(csum, bool, 0444);
|
|
module_param(csum, bool, 0444);
|
|
module_param(gso, bool, 0444);
|
|
module_param(gso, bool, 0444);
|
|
|
|
+module_param(napi_tx, bool, 0644);
|
|
|
|
|
|
/* FIXME: MTU in config. */
|
|
/* FIXME: MTU in config. */
|
|
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
|
|
#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
|
|
@@ -86,6 +87,8 @@ struct send_queue {
|
|
|
|
|
|
/* Name of the send queue: output.$index */
|
|
/* Name of the send queue: output.$index */
|
|
char name[40];
|
|
char name[40];
|
|
|
|
+
|
|
|
|
+ struct napi_struct napi;
|
|
};
|
|
};
|
|
|
|
|
|
/* Internal representation of a receive virtqueue */
|
|
/* Internal representation of a receive virtqueue */
|
|
@@ -239,15 +242,39 @@ static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
|
|
return p;
|
|
return p;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void virtqueue_napi_schedule(struct napi_struct *napi,
|
|
|
|
+ struct virtqueue *vq)
|
|
|
|
+{
|
|
|
|
+ if (napi_schedule_prep(napi)) {
|
|
|
|
+ virtqueue_disable_cb(vq);
|
|
|
|
+ __napi_schedule(napi);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void virtqueue_napi_complete(struct napi_struct *napi,
|
|
|
|
+ struct virtqueue *vq, int processed)
|
|
|
|
+{
|
|
|
|
+ int opaque;
|
|
|
|
+
|
|
|
|
+ opaque = virtqueue_enable_cb_prepare(vq);
|
|
|
|
+ if (napi_complete_done(napi, processed) &&
|
|
|
|
+ unlikely(virtqueue_poll(vq, opaque)))
|
|
|
|
+ virtqueue_napi_schedule(napi, vq);
|
|
|
|
+}
|
|
|
|
+
|
|
static void skb_xmit_done(struct virtqueue *vq)
|
|
static void skb_xmit_done(struct virtqueue *vq)
|
|
{
|
|
{
|
|
struct virtnet_info *vi = vq->vdev->priv;
|
|
struct virtnet_info *vi = vq->vdev->priv;
|
|
|
|
+ struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
|
|
|
|
|
|
/* Suppress further interrupts. */
|
|
/* Suppress further interrupts. */
|
|
virtqueue_disable_cb(vq);
|
|
virtqueue_disable_cb(vq);
|
|
|
|
|
|
- /* We were probably waiting for more output buffers. */
|
|
|
|
- netif_wake_subqueue(vi->dev, vq2txq(vq));
|
|
|
|
|
|
+ if (napi->weight)
|
|
|
|
+ virtqueue_napi_schedule(napi, vq);
|
|
|
|
+ else
|
|
|
|
+ /* We were probably waiting for more output buffers. */
|
|
|
|
+ netif_wake_subqueue(vi->dev, vq2txq(vq));
|
|
}
|
|
}
|
|
|
|
|
|
static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
|
|
static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
|
|
@@ -936,27 +963,38 @@ static void skb_recv_done(struct virtqueue *rvq)
|
|
struct virtnet_info *vi = rvq->vdev->priv;
|
|
struct virtnet_info *vi = rvq->vdev->priv;
|
|
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
|
|
struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
|
|
|
|
|
|
- /* Schedule NAPI, Suppress further interrupts if successful. */
|
|
|
|
- if (napi_schedule_prep(&rq->napi)) {
|
|
|
|
- virtqueue_disable_cb(rvq);
|
|
|
|
- __napi_schedule(&rq->napi);
|
|
|
|
- }
|
|
|
|
|
|
+ virtqueue_napi_schedule(&rq->napi, rvq);
|
|
}
|
|
}
|
|
|
|
|
|
-static void virtnet_napi_enable(struct receive_queue *rq)
|
|
|
|
|
|
+static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
|
|
{
|
|
{
|
|
- napi_enable(&rq->napi);
|
|
|
|
|
|
+ napi_enable(napi);
|
|
|
|
|
|
/* If all buffers were filled by other side before we napi_enabled, we
|
|
/* If all buffers were filled by other side before we napi_enabled, we
|
|
- * won't get another interrupt, so process any outstanding packets
|
|
|
|
- * now. virtnet_poll wants re-enable the queue, so we disable here.
|
|
|
|
- * We synchronize against interrupts via NAPI_STATE_SCHED */
|
|
|
|
- if (napi_schedule_prep(&rq->napi)) {
|
|
|
|
- virtqueue_disable_cb(rq->vq);
|
|
|
|
- local_bh_disable();
|
|
|
|
- __napi_schedule(&rq->napi);
|
|
|
|
- local_bh_enable();
|
|
|
|
|
|
+ * won't get another interrupt, so process any outstanding packets now.
|
|
|
|
+ * Call local_bh_enable after to trigger softIRQ processing.
|
|
|
|
+ */
|
|
|
|
+ local_bh_disable();
|
|
|
|
+ virtqueue_napi_schedule(napi, vq);
|
|
|
|
+ local_bh_enable();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void virtnet_napi_tx_enable(struct virtnet_info *vi,
|
|
|
|
+ struct virtqueue *vq,
|
|
|
|
+ struct napi_struct *napi)
|
|
|
|
+{
|
|
|
|
+ if (!napi->weight)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* Tx napi touches cachelines on the cpu handling tx interrupts. Only
|
|
|
|
+ * enable the feature if this is likely affine with the transmit path.
|
|
|
|
+ */
|
|
|
|
+ if (!vi->affinity_hint_set) {
|
|
|
|
+ napi->weight = 0;
|
|
|
|
+ return;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ return virtnet_napi_enable(vq, napi);
|
|
}
|
|
}
|
|
|
|
|
|
static void refill_work(struct work_struct *work)
|
|
static void refill_work(struct work_struct *work)
|
|
@@ -971,7 +1009,7 @@ static void refill_work(struct work_struct *work)
|
|
|
|
|
|
napi_disable(&rq->napi);
|
|
napi_disable(&rq->napi);
|
|
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
|
|
still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
|
|
- virtnet_napi_enable(rq);
|
|
|
|
|
|
+ virtnet_napi_enable(rq->vq, &rq->napi);
|
|
|
|
|
|
/* In theory, this can happen: if we don't get any buffers in
|
|
/* In theory, this can happen: if we don't get any buffers in
|
|
* we will *never* try to fill again.
|
|
* we will *never* try to fill again.
|
|
@@ -1007,25 +1045,68 @@ static int virtnet_receive(struct receive_queue *rq, int budget)
|
|
return received;
|
|
return received;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void free_old_xmit_skbs(struct send_queue *sq)
|
|
|
|
+{
|
|
|
|
+ struct sk_buff *skb;
|
|
|
|
+ unsigned int len;
|
|
|
|
+ struct virtnet_info *vi = sq->vq->vdev->priv;
|
|
|
|
+ struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
|
|
|
|
+ unsigned int packets = 0;
|
|
|
|
+ unsigned int bytes = 0;
|
|
|
|
+
|
|
|
|
+ while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
|
|
|
+ pr_debug("Sent skb %p\n", skb);
|
|
|
|
+
|
|
|
|
+ bytes += skb->len;
|
|
|
|
+ packets++;
|
|
|
|
+
|
|
|
|
+ dev_kfree_skb_any(skb);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Avoid overhead when no packets have been processed
|
|
|
|
+ * happens when called speculatively from start_xmit.
|
|
|
|
+ */
|
|
|
|
+ if (!packets)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ u64_stats_update_begin(&stats->tx_syncp);
|
|
|
|
+ stats->tx_bytes += bytes;
|
|
|
|
+ stats->tx_packets += packets;
|
|
|
|
+ u64_stats_update_end(&stats->tx_syncp);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void virtnet_poll_cleantx(struct receive_queue *rq)
|
|
|
|
+{
|
|
|
|
+ struct virtnet_info *vi = rq->vq->vdev->priv;
|
|
|
|
+ unsigned int index = vq2rxq(rq->vq);
|
|
|
|
+ struct send_queue *sq = &vi->sq[index];
|
|
|
|
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
|
|
|
|
+
|
|
|
|
+ if (!sq->napi.weight)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (__netif_tx_trylock(txq)) {
|
|
|
|
+ free_old_xmit_skbs(sq);
|
|
|
|
+ __netif_tx_unlock(txq);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
|
|
|
|
+ netif_tx_wake_queue(txq);
|
|
|
|
+}
|
|
|
|
+
|
|
static int virtnet_poll(struct napi_struct *napi, int budget)
|
|
static int virtnet_poll(struct napi_struct *napi, int budget)
|
|
{
|
|
{
|
|
struct receive_queue *rq =
|
|
struct receive_queue *rq =
|
|
container_of(napi, struct receive_queue, napi);
|
|
container_of(napi, struct receive_queue, napi);
|
|
- unsigned int r, received;
|
|
|
|
|
|
+ unsigned int received;
|
|
|
|
+
|
|
|
|
+ virtnet_poll_cleantx(rq);
|
|
|
|
|
|
received = virtnet_receive(rq, budget);
|
|
received = virtnet_receive(rq, budget);
|
|
|
|
|
|
/* Out of packets? */
|
|
/* Out of packets? */
|
|
- if (received < budget) {
|
|
|
|
- r = virtqueue_enable_cb_prepare(rq->vq);
|
|
|
|
- if (napi_complete_done(napi, received)) {
|
|
|
|
- if (unlikely(virtqueue_poll(rq->vq, r)) &&
|
|
|
|
- napi_schedule_prep(napi)) {
|
|
|
|
- virtqueue_disable_cb(rq->vq);
|
|
|
|
- __napi_schedule(napi);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
|
|
+ if (received < budget)
|
|
|
|
+ virtqueue_napi_complete(napi, rq->vq, received);
|
|
|
|
|
|
return received;
|
|
return received;
|
|
}
|
|
}
|
|
@@ -1040,40 +1121,29 @@ static int virtnet_open(struct net_device *dev)
|
|
/* Make sure we have some buffers: if oom use wq. */
|
|
/* Make sure we have some buffers: if oom use wq. */
|
|
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
|
|
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
|
|
schedule_delayed_work(&vi->refill, 0);
|
|
schedule_delayed_work(&vi->refill, 0);
|
|
- virtnet_napi_enable(&vi->rq[i]);
|
|
|
|
|
|
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
|
|
|
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
|
|
}
|
|
}
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static void free_old_xmit_skbs(struct send_queue *sq)
|
|
|
|
|
|
+static int virtnet_poll_tx(struct napi_struct *napi, int budget)
|
|
{
|
|
{
|
|
- struct sk_buff *skb;
|
|
|
|
- unsigned int len;
|
|
|
|
|
|
+ struct send_queue *sq = container_of(napi, struct send_queue, napi);
|
|
struct virtnet_info *vi = sq->vq->vdev->priv;
|
|
struct virtnet_info *vi = sq->vq->vdev->priv;
|
|
- struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
|
|
|
|
- unsigned int packets = 0;
|
|
|
|
- unsigned int bytes = 0;
|
|
|
|
-
|
|
|
|
- while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
|
|
|
- pr_debug("Sent skb %p\n", skb);
|
|
|
|
|
|
+ struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
|
|
|
|
|
|
- bytes += skb->len;
|
|
|
|
- packets++;
|
|
|
|
|
|
+ __netif_tx_lock(txq, raw_smp_processor_id());
|
|
|
|
+ free_old_xmit_skbs(sq);
|
|
|
|
+ __netif_tx_unlock(txq);
|
|
|
|
|
|
- dev_kfree_skb_any(skb);
|
|
|
|
- }
|
|
|
|
|
|
+ virtqueue_napi_complete(napi, sq->vq, 0);
|
|
|
|
|
|
- /* Avoid overhead when no packets have been processed
|
|
|
|
- * happens when called speculatively from start_xmit.
|
|
|
|
- */
|
|
|
|
- if (!packets)
|
|
|
|
- return;
|
|
|
|
|
|
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
|
|
|
|
+ netif_tx_wake_queue(txq);
|
|
|
|
|
|
- u64_stats_update_begin(&stats->tx_syncp);
|
|
|
|
- stats->tx_bytes += bytes;
|
|
|
|
- stats->tx_packets += packets;
|
|
|
|
- u64_stats_update_end(&stats->tx_syncp);
|
|
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
|
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
|
|
@@ -1125,10 +1195,14 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
int err;
|
|
int err;
|
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
|
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
|
|
bool kick = !skb->xmit_more;
|
|
bool kick = !skb->xmit_more;
|
|
|
|
+ bool use_napi = sq->napi.weight;
|
|
|
|
|
|
/* Free up any pending old buffers before queueing new ones. */
|
|
/* Free up any pending old buffers before queueing new ones. */
|
|
free_old_xmit_skbs(sq);
|
|
free_old_xmit_skbs(sq);
|
|
|
|
|
|
|
|
+ if (use_napi && kick)
|
|
|
|
+ virtqueue_enable_cb_delayed(sq->vq);
|
|
|
|
+
|
|
/* timestamp packet in software */
|
|
/* timestamp packet in software */
|
|
skb_tx_timestamp(skb);
|
|
skb_tx_timestamp(skb);
|
|
|
|
|
|
@@ -1147,8 +1221,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
}
|
|
}
|
|
|
|
|
|
/* Don't wait up for transmitted skbs to be freed. */
|
|
/* Don't wait up for transmitted skbs to be freed. */
|
|
- skb_orphan(skb);
|
|
|
|
- nf_reset(skb);
|
|
|
|
|
|
+ if (!use_napi) {
|
|
|
|
+ skb_orphan(skb);
|
|
|
|
+ nf_reset(skb);
|
|
|
|
+ }
|
|
|
|
|
|
/* If running out of space, stop queue to avoid getting packets that we
|
|
/* If running out of space, stop queue to avoid getting packets that we
|
|
* are then unable to transmit.
|
|
* are then unable to transmit.
|
|
@@ -1162,7 +1238,8 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
*/
|
|
*/
|
|
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
|
|
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
|
|
netif_stop_subqueue(dev, qnum);
|
|
netif_stop_subqueue(dev, qnum);
|
|
- if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
|
|
|
|
|
|
+ if (!use_napi &&
|
|
|
|
+ unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
|
|
/* More just got used, free them then recheck. */
|
|
/* More just got used, free them then recheck. */
|
|
free_old_xmit_skbs(sq);
|
|
free_old_xmit_skbs(sq);
|
|
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
|
|
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
|
|
@@ -1366,8 +1443,10 @@ static int virtnet_close(struct net_device *dev)
|
|
/* Make sure refill_work doesn't re-enable napi! */
|
|
/* Make sure refill_work doesn't re-enable napi! */
|
|
cancel_delayed_work_sync(&vi->refill);
|
|
cancel_delayed_work_sync(&vi->refill);
|
|
|
|
|
|
- for (i = 0; i < vi->max_queue_pairs; i++)
|
|
|
|
|
|
+ for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
napi_disable(&vi->rq[i].napi);
|
|
napi_disable(&vi->rq[i].napi);
|
|
|
|
+ napi_disable(&vi->sq[i].napi);
|
|
|
|
+ }
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -1722,8 +1801,10 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
|
|
cancel_delayed_work_sync(&vi->refill);
|
|
cancel_delayed_work_sync(&vi->refill);
|
|
|
|
|
|
if (netif_running(vi->dev)) {
|
|
if (netif_running(vi->dev)) {
|
|
- for (i = 0; i < vi->max_queue_pairs; i++)
|
|
|
|
|
|
+ for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
napi_disable(&vi->rq[i].napi);
|
|
napi_disable(&vi->rq[i].napi);
|
|
|
|
+ napi_disable(&vi->sq[i].napi);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1746,8 +1827,11 @@ static int virtnet_restore_up(struct virtio_device *vdev)
|
|
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
|
|
if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
|
|
schedule_delayed_work(&vi->refill, 0);
|
|
schedule_delayed_work(&vi->refill, 0);
|
|
|
|
|
|
- for (i = 0; i < vi->max_queue_pairs; i++)
|
|
|
|
- virtnet_napi_enable(&vi->rq[i]);
|
|
|
|
|
|
+ for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
|
|
+ virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
|
|
|
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
|
|
|
|
+ &vi->sq[i].napi);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
netif_device_attach(vi->dev);
|
|
netif_device_attach(vi->dev);
|
|
@@ -1952,6 +2036,7 @@ static void virtnet_free_queues(struct virtnet_info *vi)
|
|
for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
napi_hash_del(&vi->rq[i].napi);
|
|
napi_hash_del(&vi->rq[i].napi);
|
|
netif_napi_del(&vi->rq[i].napi);
|
|
netif_napi_del(&vi->rq[i].napi);
|
|
|
|
+ netif_napi_del(&vi->sq[i].napi);
|
|
}
|
|
}
|
|
|
|
|
|
/* We called napi_hash_del() before netif_napi_del(),
|
|
/* We called napi_hash_del() before netif_napi_del(),
|
|
@@ -2137,6 +2222,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
|
|
vi->rq[i].pages = NULL;
|
|
vi->rq[i].pages = NULL;
|
|
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
|
|
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
|
|
napi_weight);
|
|
napi_weight);
|
|
|
|
+ netif_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
|
|
|
|
+ napi_tx ? napi_weight : 0);
|
|
|
|
|
|
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
|
|
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
|
|
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
|
|
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
|