|
@@ -517,10 +517,10 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
|
|
/* If we expect an interrupt for the next entry, tell host
|
|
/* If we expect an interrupt for the next entry, tell host
|
|
* by writing event index and flush out the write before
|
|
* by writing event index and flush out the write before
|
|
* the read in the next get_buf call. */
|
|
* the read in the next get_buf call. */
|
|
- if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
|
|
|
|
- vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx);
|
|
|
|
- virtio_mb(vq->weak_barriers);
|
|
|
|
- }
|
|
|
|
|
|
+ if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
|
|
|
|
+ virtio_store_mb(vq->weak_barriers,
|
|
|
|
+ &vring_used_event(&vq->vring),
|
|
|
|
+ cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
|
|
|
|
|
|
#ifdef DEBUG
|
|
#ifdef DEBUG
|
|
vq->last_add_time_valid = false;
|
|
vq->last_add_time_valid = false;
|
|
@@ -653,8 +653,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
|
|
}
|
|
}
|
|
/* TODO: tune this threshold */
|
|
/* TODO: tune this threshold */
|
|
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
|
|
bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
|
|
- vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs);
|
|
|
|
- virtio_mb(vq->weak_barriers);
|
|
|
|
|
|
+
|
|
|
|
+ virtio_store_mb(vq->weak_barriers,
|
|
|
|
+ &vring_used_event(&vq->vring),
|
|
|
|
+ cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
|
|
|
|
+
|
|
if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
|
|
if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
|
|
END_USE(vq);
|
|
END_USE(vq);
|
|
return false;
|
|
return false;
|