|
@@ -743,6 +743,7 @@ static inline int netvsc_send_pkt(
|
|
u64 req_id;
|
|
u64 req_id;
|
|
int ret;
|
|
int ret;
|
|
struct hv_page_buffer *pgbuf;
|
|
struct hv_page_buffer *pgbuf;
|
|
|
|
+ u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
|
|
|
|
|
|
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
|
|
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
|
|
if (packet->is_data_pkt) {
|
|
if (packet->is_data_pkt) {
|
|
@@ -769,32 +770,42 @@ static inline int netvsc_send_pkt(
|
|
if (out_channel->rescind)
|
|
if (out_channel->rescind)
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * It is possible that once we successfully place this packet
|
|
|
|
+ * on the ringbuffer, we may stop the queue. In that case, we want
|
|
|
|
+ * to notify the host independent of the xmit_more flag. We don't
|
|
|
|
+ * need to be precise here; in the worst case we may signal the host
|
|
|
|
+ * unnecessarily.
|
|
|
|
+ */
|
|
|
|
+ if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
|
|
|
|
+ packet->xmit_more = false;
|
|
|
|
+
|
|
if (packet->page_buf_cnt) {
|
|
if (packet->page_buf_cnt) {
|
|
pgbuf = packet->cp_partial ? packet->page_buf +
|
|
pgbuf = packet->cp_partial ? packet->page_buf +
|
|
packet->rmsg_pgcnt : packet->page_buf;
|
|
packet->rmsg_pgcnt : packet->page_buf;
|
|
- ret = vmbus_sendpacket_pagebuffer(out_channel,
|
|
|
|
- pgbuf,
|
|
|
|
- packet->page_buf_cnt,
|
|
|
|
- &nvmsg,
|
|
|
|
- sizeof(struct nvsp_message),
|
|
|
|
- req_id);
|
|
|
|
|
|
+ ret = vmbus_sendpacket_pagebuffer_ctl(out_channel,
|
|
|
|
+ pgbuf,
|
|
|
|
+ packet->page_buf_cnt,
|
|
|
|
+ &nvmsg,
|
|
|
|
+ sizeof(struct nvsp_message),
|
|
|
|
+ req_id,
|
|
|
|
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
|
|
|
|
+ !packet->xmit_more);
|
|
} else {
|
|
} else {
|
|
- ret = vmbus_sendpacket(
|
|
|
|
- out_channel, &nvmsg,
|
|
|
|
- sizeof(struct nvsp_message),
|
|
|
|
- req_id,
|
|
|
|
- VM_PKT_DATA_INBAND,
|
|
|
|
- VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
|
|
|
|
|
+ ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
|
|
|
|
+ sizeof(struct nvsp_message),
|
|
|
|
+ req_id,
|
|
|
|
+ VM_PKT_DATA_INBAND,
|
|
|
|
+ VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
|
|
|
|
+ !packet->xmit_more);
|
|
}
|
|
}
|
|
|
|
|
|
if (ret == 0) {
|
|
if (ret == 0) {
|
|
atomic_inc(&net_device->num_outstanding_sends);
|
|
atomic_inc(&net_device->num_outstanding_sends);
|
|
atomic_inc(&net_device->queue_sends[q_idx]);
|
|
atomic_inc(&net_device->queue_sends[q_idx]);
|
|
|
|
|
|
- if (hv_ringbuf_avail_percent(&out_channel->outbound) <
|
|
|
|
- RING_AVAIL_PERCENT_LOWATER) {
|
|
|
|
- netif_tx_stop_queue(netdev_get_tx_queue(
|
|
|
|
- ndev, q_idx));
|
|
|
|
|
|
+ if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
|
|
|
|
+ netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
|
|
|
|
|
|
if (atomic_read(&net_device->
|
|
if (atomic_read(&net_device->
|
|
queue_sends[q_idx]) < 1)
|
|
queue_sends[q_idx]) < 1)
|