|
@@ -480,6 +480,74 @@ signal_used:
|
|
nvq->batched_xdp = 0;
|
|
nvq->batched_xdp = 0;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int sock_has_rx_data(struct socket *sock)
|
|
|
|
+{
|
|
|
|
+ if (unlikely(!sock))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ if (sock->ops->peek_len)
|
|
|
|
+ return sock->ops->peek_len(sock);
|
|
|
|
+
|
|
|
|
+ return skb_queue_empty(&sock->sk->sk_receive_queue);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
|
|
|
|
+ struct vhost_virtqueue *vq)
|
|
|
|
+{
|
|
|
|
+ if (!vhost_vq_avail_empty(&net->dev, vq)) {
|
|
|
|
+ vhost_poll_queue(&vq->poll);
|
|
|
|
+ } else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
|
|
|
|
+ vhost_disable_notify(&net->dev, vq);
|
|
|
|
+ vhost_poll_queue(&vq->poll);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void vhost_net_busy_poll(struct vhost_net *net,
|
|
|
|
+ struct vhost_virtqueue *rvq,
|
|
|
|
+ struct vhost_virtqueue *tvq,
|
|
|
|
+ bool *busyloop_intr,
|
|
|
|
+ bool poll_rx)
|
|
|
|
+{
|
|
|
|
+ unsigned long busyloop_timeout;
|
|
|
|
+ unsigned long endtime;
|
|
|
|
+ struct socket *sock;
|
|
|
|
+ struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
|
|
|
|
+
|
|
|
|
+ mutex_lock_nested(&vq->mutex, poll_rx ? VHOST_NET_VQ_TX: VHOST_NET_VQ_RX);
|
|
|
|
+ vhost_disable_notify(&net->dev, vq);
|
|
|
|
+ sock = rvq->private_data;
|
|
|
|
+
|
|
|
|
+ busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
|
|
|
|
+ tvq->busyloop_timeout;
|
|
|
|
+
|
|
|
|
+ preempt_disable();
|
|
|
|
+ endtime = busy_clock() + busyloop_timeout;
|
|
|
|
+
|
|
|
|
+ while (vhost_can_busy_poll(endtime)) {
|
|
|
|
+ if (vhost_has_work(&net->dev)) {
|
|
|
|
+ *busyloop_intr = true;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if ((sock_has_rx_data(sock) &&
|
|
|
|
+ !vhost_vq_avail_empty(&net->dev, rvq)) ||
|
|
|
|
+ !vhost_vq_avail_empty(&net->dev, tvq))
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ cpu_relax();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ preempt_enable();
|
|
|
|
+
|
|
|
|
+ if (poll_rx || sock_has_rx_data(sock))
|
|
|
|
+ vhost_net_busy_poll_try_queue(net, vq);
|
|
|
|
+ else if (!poll_rx) /* On tx here, sock has no rx data. */
|
|
|
|
+ vhost_enable_notify(&net->dev, rvq);
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&vq->mutex);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
|
|
static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
|
|
struct vhost_net_virtqueue *nvq,
|
|
struct vhost_net_virtqueue *nvq,
|
|
unsigned int *out_num, unsigned int *in_num,
|
|
unsigned int *out_num, unsigned int *in_num,
|
|
@@ -897,16 +965,6 @@ static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
|
|
return len;
|
|
return len;
|
|
}
|
|
}
|
|
|
|
|
|
-static int sk_has_rx_data(struct sock *sk)
|
|
|
|
-{
|
|
|
|
- struct socket *sock = sk->sk_socket;
|
|
|
|
-
|
|
|
|
- if (sock->ops->peek_len)
|
|
|
|
- return sock->ops->peek_len(sock);
|
|
|
|
-
|
|
|
|
- return skb_queue_empty(&sk->sk_receive_queue);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
|
|
static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
|
|
bool *busyloop_intr)
|
|
bool *busyloop_intr)
|
|
{
|
|
{
|
|
@@ -914,41 +972,13 @@ static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
|
|
struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
|
|
struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
|
|
struct vhost_virtqueue *rvq = &rnvq->vq;
|
|
struct vhost_virtqueue *rvq = &rnvq->vq;
|
|
struct vhost_virtqueue *tvq = &tnvq->vq;
|
|
struct vhost_virtqueue *tvq = &tnvq->vq;
|
|
- unsigned long uninitialized_var(endtime);
|
|
|
|
int len = peek_head_len(rnvq, sk);
|
|
int len = peek_head_len(rnvq, sk);
|
|
|
|
|
|
- if (!len && tvq->busyloop_timeout) {
|
|
|
|
|
|
+ if (!len && rvq->busyloop_timeout) {
|
|
/* Flush batched heads first */
|
|
/* Flush batched heads first */
|
|
vhost_net_signal_used(rnvq);
|
|
vhost_net_signal_used(rnvq);
|
|
/* Both tx vq and rx socket were polled here */
|
|
/* Both tx vq and rx socket were polled here */
|
|
- mutex_lock_nested(&tvq->mutex, VHOST_NET_VQ_TX);
|
|
|
|
- vhost_disable_notify(&net->dev, tvq);
|
|
|
|
-
|
|
|
|
- preempt_disable();
|
|
|
|
- endtime = busy_clock() + tvq->busyloop_timeout;
|
|
|
|
-
|
|
|
|
- while (vhost_can_busy_poll(endtime)) {
|
|
|
|
- if (vhost_has_work(&net->dev)) {
|
|
|
|
- *busyloop_intr = true;
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- if ((sk_has_rx_data(sk) &&
|
|
|
|
- !vhost_vq_avail_empty(&net->dev, rvq)) ||
|
|
|
|
- !vhost_vq_avail_empty(&net->dev, tvq))
|
|
|
|
- break;
|
|
|
|
- cpu_relax();
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- preempt_enable();
|
|
|
|
-
|
|
|
|
- if (!vhost_vq_avail_empty(&net->dev, tvq)) {
|
|
|
|
- vhost_poll_queue(&tvq->poll);
|
|
|
|
- } else if (unlikely(vhost_enable_notify(&net->dev, tvq))) {
|
|
|
|
- vhost_disable_notify(&net->dev, tvq);
|
|
|
|
- vhost_poll_queue(&tvq->poll);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- mutex_unlock(&tvq->mutex);
|
|
|
|
|
|
+ vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
|
|
|
|
|
|
len = peek_head_len(rnvq, sk);
|
|
len = peek_head_len(rnvq, sk);
|
|
}
|
|
}
|