|
@@ -94,7 +94,7 @@ struct vhost_net_ubuf_ref {
|
|
|
struct vhost_virtqueue *vq;
|
|
|
};
|
|
|
|
|
|
-#define VHOST_RX_BATCH 64
|
|
|
+#define VHOST_NET_BATCH 64
|
|
|
struct vhost_net_buf {
|
|
|
void **queue;
|
|
|
int tail;
|
|
@@ -168,7 +168,7 @@ static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
|
|
|
|
|
|
rxq->head = 0;
|
|
|
rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
|
|
|
- VHOST_RX_BATCH);
|
|
|
+ VHOST_NET_BATCH);
|
|
|
return rxq->tail;
|
|
|
}
|
|
|
|
|
@@ -1007,7 +1007,7 @@ static void handle_rx(struct vhost_net *net)
|
|
|
goto out;
|
|
|
}
|
|
|
nvq->done_idx += headcount;
|
|
|
- if (nvq->done_idx > VHOST_RX_BATCH)
|
|
|
+ if (nvq->done_idx > VHOST_NET_BATCH)
|
|
|
vhost_net_signal_used(nvq);
|
|
|
if (unlikely(vq_log))
|
|
|
vhost_log_write(vq, vq_log, log, vhost_len);
|
|
@@ -1075,7 +1075,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- queue = kmalloc_array(VHOST_RX_BATCH, sizeof(void *),
|
|
|
+ queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
|
|
|
GFP_KERNEL);
|
|
|
if (!queue) {
|
|
|
kfree(vqs);
|