|
|
@@ -1357,6 +1357,16 @@ static void free_old_xmit_skbs(struct send_queue *sq)
|
|
|
u64_stats_update_end(&sq->stats.syncp);
|
|
|
}
|
|
|
|
|
|
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
|
|
|
+{
|
|
|
+ if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
|
|
|
+ return false;
|
|
|
+ else if (q < vi->curr_queue_pairs)
|
|
|
+ return true;
|
|
|
+ else
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
static void virtnet_poll_cleantx(struct receive_queue *rq)
|
|
|
{
|
|
|
struct virtnet_info *vi = rq->vq->vdev->priv;
|
|
|
@@ -1364,7 +1374,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
|
|
|
struct send_queue *sq = &vi->sq[index];
|
|
|
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
|
|
|
|
|
|
- if (!sq->napi.weight)
|
|
|
+ if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
|
|
|
return;
|
|
|
|
|
|
if (__netif_tx_trylock(txq)) {
|
|
|
@@ -1441,8 +1451,16 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
|
|
|
{
|
|
|
struct send_queue *sq = container_of(napi, struct send_queue, napi);
|
|
|
struct virtnet_info *vi = sq->vq->vdev->priv;
|
|
|
- struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
|
|
|
+ unsigned int index = vq2txq(sq->vq);
|
|
|
+ struct netdev_queue *txq;
|
|
|
|
|
|
+ if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
|
|
|
+ /* We don't need to enable cb for XDP */
|
|
|
+ napi_complete_done(napi, 0);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ txq = netdev_get_tx_queue(vi->dev, index);
|
|
|
__netif_tx_lock(txq, raw_smp_processor_id());
|
|
|
free_old_xmit_skbs(sq);
|
|
|
__netif_tx_unlock(txq);
|
|
|
@@ -2352,9 +2370,12 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|
|
}
|
|
|
|
|
|
/* Make sure NAPI is not using any XDP TX queues for RX. */
|
|
|
- if (netif_running(dev))
|
|
|
- for (i = 0; i < vi->max_queue_pairs; i++)
|
|
|
+ if (netif_running(dev)) {
|
|
|
+ for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
|
napi_disable(&vi->rq[i].napi);
|
|
|
+ virtnet_napi_tx_disable(&vi->sq[i].napi);
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
|
|
|
err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
|
|
|
@@ -2373,16 +2394,22 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|
|
}
|
|
|
if (old_prog)
|
|
|
bpf_prog_put(old_prog);
|
|
|
- if (netif_running(dev))
|
|
|
+ if (netif_running(dev)) {
|
|
|
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
|
|
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
|
|
|
+ &vi->sq[i].napi);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
err:
|
|
|
if (netif_running(dev)) {
|
|
|
- for (i = 0; i < vi->max_queue_pairs; i++)
|
|
|
+ for (i = 0; i < vi->max_queue_pairs; i++) {
|
|
|
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
|
|
+ virtnet_napi_tx_enable(vi, vi->sq[i].vq,
|
|
|
+ &vi->sq[i].napi);
|
|
|
+ }
|
|
|
}
|
|
|
if (prog)
|
|
|
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
|
|
|
@@ -2539,16 +2566,6 @@ static void free_receive_page_frags(struct virtnet_info *vi)
|
|
|
put_page(vi->rq[i].alloc_frag.page);
|
|
|
}
|
|
|
|
|
|
-static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
|
|
|
-{
|
|
|
- if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
|
|
|
- return false;
|
|
|
- else if (q < vi->curr_queue_pairs)
|
|
|
- return true;
|
|
|
- else
|
|
|
- return false;
|
|
|
-}
|
|
|
-
|
|
|
static void free_unused_bufs(struct virtnet_info *vi)
|
|
|
{
|
|
|
void *buf;
|