|
@@ -556,6 +556,7 @@ void netvsc_device_remove(struct hv_device *device)
|
|
|
struct net_device *ndev = hv_get_drvdata(device);
|
|
|
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
|
|
struct netvsc_device *net_device = net_device_ctx->nvdev;
|
|
|
+ int i;
|
|
|
|
|
|
netvsc_disconnect_vsp(device);
|
|
|
|
|
@@ -570,6 +571,9 @@ void netvsc_device_remove(struct hv_device *device)
|
|
|
/* Now, we can close the channel safely */
|
|
|
vmbus_close(device->channel);
|
|
|
|
|
|
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++)
|
|
|
+ napi_disable(&net_device->chan_table[0].napi);
|
|
|
+
|
|
|
/* Release all resources */
|
|
|
free_netvsc_device(net_device);
|
|
|
}
|
|
@@ -600,9 +604,9 @@ static inline void netvsc_free_send_slot(struct netvsc_device *net_device,
|
|
|
static void netvsc_send_tx_complete(struct netvsc_device *net_device,
|
|
|
struct vmbus_channel *incoming_channel,
|
|
|
struct hv_device *device,
|
|
|
- struct vmpacket_descriptor *packet)
|
|
|
+ const struct vmpacket_descriptor *desc)
|
|
|
{
|
|
|
- struct sk_buff *skb = (struct sk_buff *)(unsigned long)packet->trans_id;
|
|
|
+ struct sk_buff *skb = (struct sk_buff *)(unsigned long)desc->trans_id;
|
|
|
struct net_device *ndev = hv_get_drvdata(device);
|
|
|
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
|
|
struct vmbus_channel *channel = device->channel;
|
|
@@ -647,14 +651,11 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
|
|
|
static void netvsc_send_completion(struct netvsc_device *net_device,
|
|
|
struct vmbus_channel *incoming_channel,
|
|
|
struct hv_device *device,
|
|
|
- struct vmpacket_descriptor *packet)
|
|
|
+ const struct vmpacket_descriptor *desc)
|
|
|
{
|
|
|
- struct nvsp_message *nvsp_packet;
|
|
|
+ struct nvsp_message *nvsp_packet = hv_pkt_data(desc);
|
|
|
struct net_device *ndev = hv_get_drvdata(device);
|
|
|
|
|
|
- nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
|
|
|
- (packet->offset8 << 3));
|
|
|
-
|
|
|
switch (nvsp_packet->hdr.msg_type) {
|
|
|
case NVSP_MSG_TYPE_INIT_COMPLETE:
|
|
|
case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE:
|
|
@@ -668,7 +669,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
|
|
|
|
|
|
case NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE:
|
|
|
netvsc_send_tx_complete(net_device, incoming_channel,
|
|
|
- device, packet);
|
|
|
+ device, desc);
|
|
|
break;
|
|
|
|
|
|
default:
|
|
@@ -1066,28 +1067,29 @@ static inline struct recv_comp_data *get_recv_comp_slot(
|
|
|
return rcd;
|
|
|
}
|
|
|
|
|
|
-static void netvsc_receive(struct net_device *ndev,
|
|
|
+static int netvsc_receive(struct net_device *ndev,
|
|
|
struct netvsc_device *net_device,
|
|
|
struct net_device_context *net_device_ctx,
|
|
|
struct hv_device *device,
|
|
|
struct vmbus_channel *channel,
|
|
|
- struct vmtransfer_page_packet_header *vmxferpage_packet,
|
|
|
+ const struct vmpacket_descriptor *desc,
|
|
|
struct nvsp_message *nvsp)
|
|
|
{
|
|
|
+ const struct vmtransfer_page_packet_header *vmxferpage_packet
|
|
|
+ = container_of(desc, const struct vmtransfer_page_packet_header, d);
|
|
|
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
|
|
char *recv_buf = net_device->recv_buf;
|
|
|
u32 status = NVSP_STAT_SUCCESS;
|
|
|
int i;
|
|
|
int count = 0;
|
|
|
int ret;
|
|
|
- struct recv_comp_data *rcd;
|
|
|
- u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
|
|
|
|
|
/* Make sure this is a valid nvsp packet */
|
|
|
if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
|
|
|
netif_err(net_device_ctx, rx_err, ndev,
|
|
|
"Unknown nvsp packet type received %u\n",
|
|
|
nvsp->hdr.msg_type);
|
|
|
- return;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
|
|
@@ -1095,7 +1097,7 @@ static void netvsc_receive(struct net_device *ndev,
|
|
|
"Invalid xfer page set id - expecting %x got %x\n",
|
|
|
NETVSC_RECEIVE_BUFFER_ID,
|
|
|
vmxferpage_packet->xfer_pageset_id);
|
|
|
- return;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
count = vmxferpage_packet->range_cnt;
|
|
@@ -1111,26 +1113,26 @@ static void netvsc_receive(struct net_device *ndev,
|
|
|
channel, data, buflen);
|
|
|
}
|
|
|
|
|
|
- if (!net_device->chan_table[q_idx].mrc.buf) {
|
|
|
+ if (net_device->chan_table[q_idx].mrc.buf) {
|
|
|
+ struct recv_comp_data *rcd;
|
|
|
+
|
|
|
+ rcd = get_recv_comp_slot(net_device, channel, q_idx);
|
|
|
+ if (rcd) {
|
|
|
+ rcd->tid = vmxferpage_packet->d.trans_id;
|
|
|
+ rcd->status = status;
|
|
|
+ } else {
|
|
|
+ netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
|
|
|
+ q_idx, vmxferpage_packet->d.trans_id);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
ret = netvsc_send_recv_completion(channel,
|
|
|
vmxferpage_packet->d.trans_id,
|
|
|
status);
|
|
|
if (ret)
|
|
|
netdev_err(ndev, "Recv_comp q:%hd, tid:%llx, err:%d\n",
|
|
|
q_idx, vmxferpage_packet->d.trans_id, ret);
|
|
|
- return;
|
|
|
}
|
|
|
-
|
|
|
- rcd = get_recv_comp_slot(net_device, channel, q_idx);
|
|
|
-
|
|
|
- if (!rcd) {
|
|
|
- netdev_err(ndev, "Recv_comp full buf q:%hd, tid:%llx\n",
|
|
|
- q_idx, vmxferpage_packet->d.trans_id);
|
|
|
- return;
|
|
|
- }
|
|
|
-
|
|
|
- rcd->tid = vmxferpage_packet->d.trans_id;
|
|
|
- rcd->status = status;
|
|
|
+ return count;
|
|
|
}
|
|
|
|
|
|
static void netvsc_send_table(struct hv_device *hdev,
|
|
@@ -1180,17 +1182,15 @@ static inline void netvsc_receive_inband(struct hv_device *hdev,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static void netvsc_process_raw_pkt(struct hv_device *device,
|
|
|
- struct vmbus_channel *channel,
|
|
|
- struct netvsc_device *net_device,
|
|
|
- struct net_device *ndev,
|
|
|
- u64 request_id,
|
|
|
- struct vmpacket_descriptor *desc)
|
|
|
+static int netvsc_process_raw_pkt(struct hv_device *device,
|
|
|
+ struct vmbus_channel *channel,
|
|
|
+ struct netvsc_device *net_device,
|
|
|
+ struct net_device *ndev,
|
|
|
+ u64 request_id,
|
|
|
+ const struct vmpacket_descriptor *desc)
|
|
|
{
|
|
|
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
|
|
- struct nvsp_message *nvmsg
|
|
|
- = (struct nvsp_message *)((unsigned long)desc
|
|
|
- + (desc->offset8 << 3));
|
|
|
+ struct nvsp_message *nvmsg = hv_pkt_data(desc);
|
|
|
|
|
|
switch (desc->type) {
|
|
|
case VM_PKT_COMP:
|
|
@@ -1198,10 +1198,8 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
|
|
|
break;
|
|
|
|
|
|
case VM_PKT_DATA_USING_XFER_PAGES:
|
|
|
- netvsc_receive(ndev, net_device, net_device_ctx,
|
|
|
- device, channel,
|
|
|
- (struct vmtransfer_page_packet_header *)desc,
|
|
|
- nvmsg);
|
|
|
+ return netvsc_receive(ndev, net_device, net_device_ctx,
|
|
|
+ device, channel, desc, nvmsg);
|
|
|
break;
|
|
|
|
|
|
case VM_PKT_DATA_INBAND:
|
|
@@ -1213,22 +1211,63 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
|
|
|
desc->type, request_id);
|
|
|
break;
|
|
|
}
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static struct hv_device *netvsc_channel_to_device(struct vmbus_channel *channel)
|
|
|
+{
|
|
|
+ struct vmbus_channel *primary = channel->primary_channel;
|
|
|
+
|
|
|
+ return primary ? primary->device_obj : channel->device_obj;
|
|
|
+}
|
|
|
+
|
|
|
+int netvsc_poll(struct napi_struct *napi, int budget)
|
|
|
+{
|
|
|
+ struct netvsc_channel *nvchan
|
|
|
+ = container_of(napi, struct netvsc_channel, napi);
|
|
|
+ struct vmbus_channel *channel = nvchan->channel;
|
|
|
+ struct hv_device *device = netvsc_channel_to_device(channel);
|
|
|
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
|
|
+ struct net_device *ndev = hv_get_drvdata(device);
|
|
|
+ struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
|
|
|
+ const struct vmpacket_descriptor *desc;
|
|
|
+ int work_done = 0;
|
|
|
+
|
|
|
+ desc = hv_pkt_iter_first(channel);
|
|
|
+ while (desc) {
|
|
|
+ int count;
|
|
|
+
|
|
|
+ count = netvsc_process_raw_pkt(device, channel, net_device,
|
|
|
+ ndev, desc->trans_id, desc);
|
|
|
+ work_done += count;
|
|
|
+ desc = __hv_pkt_iter_next(channel, desc);
|
|
|
+
|
|
|
+ /* If receive packet budget is exhausted, reschedule */
|
|
|
+ if (work_done >= budget) {
|
|
|
+ work_done = budget;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ hv_pkt_iter_close(channel);
|
|
|
+
|
|
|
+ /* If ring is empty and NAPI is not doing polling */
|
|
|
+ if (work_done < budget &&
|
|
|
+ napi_complete_done(napi, work_done) &&
|
|
|
+ hv_end_read(&channel->inbound) != 0)
|
|
|
+ napi_reschedule(napi);
|
|
|
+
|
|
|
+ netvsc_chk_recv_comp(net_device, channel, q_idx);
|
|
|
+ return work_done;
|
|
|
}
|
|
|
|
|
|
void netvsc_channel_cb(void *context)
|
|
|
{
|
|
|
struct vmbus_channel *channel = context;
|
|
|
+ struct hv_device *device = netvsc_channel_to_device(channel);
|
|
|
u16 q_idx = channel->offermsg.offer.sub_channel_index;
|
|
|
- struct hv_device *device;
|
|
|
struct netvsc_device *net_device;
|
|
|
- struct vmpacket_descriptor *desc;
|
|
|
struct net_device *ndev;
|
|
|
- bool need_to_commit = false;
|
|
|
-
|
|
|
- if (channel->primary_channel != NULL)
|
|
|
- device = channel->primary_channel->device_obj;
|
|
|
- else
|
|
|
- device = channel->device_obj;
|
|
|
|
|
|
ndev = hv_get_drvdata(device);
|
|
|
if (unlikely(!ndev))
|
|
@@ -1239,21 +1278,9 @@ void netvsc_channel_cb(void *context)
|
|
|
netvsc_channel_idle(net_device, q_idx))
|
|
|
return;
|
|
|
|
|
|
- /* commit_rd_index() -> hv_signal_on_read() needs this. */
|
|
|
- init_cached_read_index(channel);
|
|
|
-
|
|
|
- while ((desc = get_next_pkt_raw(channel)) != NULL) {
|
|
|
- netvsc_process_raw_pkt(device, channel, net_device,
|
|
|
- ndev, desc->trans_id, desc);
|
|
|
-
|
|
|
- put_pkt_raw(channel, desc);
|
|
|
- need_to_commit = true;
|
|
|
- }
|
|
|
-
|
|
|
- if (need_to_commit)
|
|
|
- commit_rd_index(channel);
|
|
|
-
|
|
|
- netvsc_chk_recv_comp(net_device, channel, q_idx);
|
|
|
+ /* disable interupts from host */
|
|
|
+ hv_begin_read(&channel->inbound);
|
|
|
+ napi_schedule(&net_device->chan_table[q_idx].napi);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1275,6 +1302,11 @@ int netvsc_device_add(struct hv_device *device,
|
|
|
|
|
|
net_device->ring_size = ring_size;
|
|
|
|
|
|
+ /* Because the device uses NAPI, all the interrupt batching and
|
|
|
+ * control is done via Net softirq, not the channel handling
|
|
|
+ */
|
|
|
+ set_channel_read_mode(device->channel, HV_CALL_ISR);
|
|
|
+
|
|
|
/* Open the channel */
|
|
|
ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
|
|
|
ring_size * PAGE_SIZE, NULL, 0,
|
|
@@ -1292,8 +1324,16 @@ int netvsc_device_add(struct hv_device *device,
|
|
|
* chn_table with the default channel to use it before subchannels are
|
|
|
* opened.
|
|
|
*/
|
|
|
- for (i = 0; i < VRSS_CHANNEL_MAX; i++)
|
|
|
- net_device->chan_table[i].channel = device->channel;
|
|
|
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
|
|
|
+ struct netvsc_channel *nvchan = &net_device->chan_table[i];
|
|
|
+
|
|
|
+ nvchan->channel = device->channel;
|
|
|
+ netif_napi_add(ndev, &nvchan->napi,
|
|
|
+ netvsc_poll, NAPI_POLL_WEIGHT);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Enable NAPI handler for init callbacks */
|
|
|
+ napi_enable(&net_device->chan_table[0].napi);
|
|
|
|
|
|
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
|
|
|
* populated.
|
|
@@ -1313,6 +1353,8 @@ int netvsc_device_add(struct hv_device *device,
|
|
|
return ret;
|
|
|
|
|
|
close:
|
|
|
+ napi_disable(&net_device->chan_table[0].napi);
|
|
|
+
|
|
|
/* Now, we can close the channel safely */
|
|
|
vmbus_close(device->channel);
|
|
|
|