|
@@ -213,6 +213,47 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
|
|
|
return len;
|
|
|
}
|
|
|
|
|
|
+static int
|
|
|
+virtio_transport_cancel_pkt(struct vsock_sock *vsk)
|
|
|
+{
|
|
|
+ struct virtio_vsock *vsock;
|
|
|
+ struct virtio_vsock_pkt *pkt, *n;
|
|
|
+ int cnt = 0;
|
|
|
+ LIST_HEAD(freeme);
|
|
|
+
|
|
|
+ vsock = virtio_vsock_get();
|
|
|
+ if (!vsock) {
|
|
|
+ return -ENODEV;
|
|
|
+ }
|
|
|
+
|
|
|
+ spin_lock_bh(&vsock->send_pkt_list_lock);
|
|
|
+ list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
|
|
|
+ if (pkt->vsk != vsk)
|
|
|
+ continue;
|
|
|
+ list_move(&pkt->list, &freeme);
|
|
|
+ }
|
|
|
+ spin_unlock_bh(&vsock->send_pkt_list_lock);
|
|
|
+
|
|
|
+ list_for_each_entry_safe(pkt, n, &freeme, list) {
|
|
|
+ if (pkt->reply)
|
|
|
+ cnt++;
|
|
|
+ list_del(&pkt->list);
|
|
|
+ virtio_transport_free_pkt(pkt);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (cnt) {
|
|
|
+ struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX];
|
|
|
+ int new_cnt;
|
|
|
+
|
|
|
+ new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
|
|
|
+ if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) &&
|
|
|
+ new_cnt < virtqueue_get_vring_size(rx_vq))
|
|
|
+ queue_work(virtio_vsock_workqueue, &vsock->rx_work);
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
|
|
|
{
|
|
|
int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE;
|
|
@@ -462,6 +503,7 @@ static struct virtio_transport virtio_transport = {
|
|
|
.release = virtio_transport_release,
|
|
|
.connect = virtio_transport_connect,
|
|
|
.shutdown = virtio_transport_shutdown,
|
|
|
+ .cancel_pkt = virtio_transport_cancel_pkt,
|
|
|
|
|
|
.dgram_bind = virtio_transport_dgram_bind,
|
|
|
.dgram_dequeue = virtio_transport_dgram_dequeue,
|