|
@@ -741,7 +741,7 @@ static int sk_has_rx_data(struct sock *sk)
|
|
return skb_queue_empty(&sk->sk_receive_queue);
|
|
return skb_queue_empty(&sk->sk_receive_queue);
|
|
}
|
|
}
|
|
|
|
|
|
-static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq)
|
|
|
|
|
|
+static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
|
|
{
|
|
{
|
|
struct vhost_virtqueue *vq = &nvq->vq;
|
|
struct vhost_virtqueue *vq = &nvq->vq;
|
|
struct vhost_dev *dev = vq->dev;
|
|
struct vhost_dev *dev = vq->dev;
|
|
@@ -765,7 +765,7 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
|
|
|
|
|
|
if (!len && tvq->busyloop_timeout) {
|
|
if (!len && tvq->busyloop_timeout) {
|
|
/* Flush batched heads first */
|
|
/* Flush batched heads first */
|
|
- vhost_rx_signal_used(rnvq);
|
|
|
|
|
|
+ vhost_net_signal_used(rnvq);
|
|
/* Both tx vq and rx socket were polled here */
|
|
/* Both tx vq and rx socket were polled here */
|
|
mutex_lock_nested(&tvq->mutex, 1);
|
|
mutex_lock_nested(&tvq->mutex, 1);
|
|
vhost_disable_notify(&net->dev, tvq);
|
|
vhost_disable_notify(&net->dev, tvq);
|
|
@@ -1008,7 +1008,7 @@ static void handle_rx(struct vhost_net *net)
|
|
}
|
|
}
|
|
nvq->done_idx += headcount;
|
|
nvq->done_idx += headcount;
|
|
if (nvq->done_idx > VHOST_RX_BATCH)
|
|
if (nvq->done_idx > VHOST_RX_BATCH)
|
|
- vhost_rx_signal_used(nvq);
|
|
|
|
|
|
+ vhost_net_signal_used(nvq);
|
|
if (unlikely(vq_log))
|
|
if (unlikely(vq_log))
|
|
vhost_log_write(vq, vq_log, log, vhost_len);
|
|
vhost_log_write(vq, vq_log, log, vhost_len);
|
|
total_len += vhost_len;
|
|
total_len += vhost_len;
|
|
@@ -1022,7 +1022,7 @@ static void handle_rx(struct vhost_net *net)
|
|
else
|
|
else
|
|
vhost_net_enable_vq(net, vq);
|
|
vhost_net_enable_vq(net, vq);
|
|
out:
|
|
out:
|
|
- vhost_rx_signal_used(nvq);
|
|
|
|
|
|
+ vhost_net_signal_used(nvq);
|
|
mutex_unlock(&vq->mutex);
|
|
mutex_unlock(&vq->mutex);
|
|
}
|
|
}
|
|
|
|
|