netvsc_drv.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, see <http://www.gnu.org/licenses/>.
  15. *
  16. * Authors:
  17. * Haiyang Zhang <haiyangz@microsoft.com>
  18. * Hank Janssen <hjanssen@microsoft.com>
  19. */
  20. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21. #include <linux/init.h>
  22. #include <linux/atomic.h>
  23. #include <linux/module.h>
  24. #include <linux/highmem.h>
  25. #include <linux/device.h>
  26. #include <linux/io.h>
  27. #include <linux/delay.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/inetdevice.h>
  30. #include <linux/etherdevice.h>
  31. #include <linux/skbuff.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/in.h>
  34. #include <linux/slab.h>
  35. #include <net/arp.h>
  36. #include <net/route.h>
  37. #include <net/sock.h>
  38. #include <net/pkt_sched.h>
  39. #include "hyperv_net.h"
  40. struct net_device_context {
  41. /* point back to our device context */
  42. struct hv_device *device_ctx;
  43. struct delayed_work dwork;
  44. struct work_struct work;
  45. };
  46. #define RING_SIZE_MIN 64
  47. static int ring_size = 128;
  48. module_param(ring_size, int, S_IRUGO);
  49. MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
  50. static void do_set_multicast(struct work_struct *w)
  51. {
  52. struct net_device_context *ndevctx =
  53. container_of(w, struct net_device_context, work);
  54. struct netvsc_device *nvdev;
  55. struct rndis_device *rdev;
  56. nvdev = hv_get_drvdata(ndevctx->device_ctx);
  57. if (nvdev == NULL || nvdev->ndev == NULL)
  58. return;
  59. rdev = nvdev->extension;
  60. if (rdev == NULL)
  61. return;
  62. if (nvdev->ndev->flags & IFF_PROMISC)
  63. rndis_filter_set_packet_filter(rdev,
  64. NDIS_PACKET_TYPE_PROMISCUOUS);
  65. else
  66. rndis_filter_set_packet_filter(rdev,
  67. NDIS_PACKET_TYPE_BROADCAST |
  68. NDIS_PACKET_TYPE_ALL_MULTICAST |
  69. NDIS_PACKET_TYPE_DIRECTED);
  70. }
  71. static void netvsc_set_multicast_list(struct net_device *net)
  72. {
  73. struct net_device_context *net_device_ctx = netdev_priv(net);
  74. schedule_work(&net_device_ctx->work);
  75. }
  76. static int netvsc_open(struct net_device *net)
  77. {
  78. struct net_device_context *net_device_ctx = netdev_priv(net);
  79. struct hv_device *device_obj = net_device_ctx->device_ctx;
  80. struct netvsc_device *nvdev;
  81. struct rndis_device *rdev;
  82. int ret = 0;
  83. netif_carrier_off(net);
  84. /* Open up the device */
  85. ret = rndis_filter_open(device_obj);
  86. if (ret != 0) {
  87. netdev_err(net, "unable to open device (ret %d).\n", ret);
  88. return ret;
  89. }
  90. netif_tx_start_all_queues(net);
  91. nvdev = hv_get_drvdata(device_obj);
  92. rdev = nvdev->extension;
  93. if (!rdev->link_state)
  94. netif_carrier_on(net);
  95. return ret;
  96. }
  97. static int netvsc_close(struct net_device *net)
  98. {
  99. struct net_device_context *net_device_ctx = netdev_priv(net);
  100. struct hv_device *device_obj = net_device_ctx->device_ctx;
  101. int ret;
  102. netif_tx_disable(net);
  103. /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
  104. cancel_work_sync(&net_device_ctx->work);
  105. ret = rndis_filter_close(device_obj);
  106. if (ret != 0)
  107. netdev_err(net, "unable to close device (ret %d).\n", ret);
  108. return ret;
  109. }
  110. static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
  111. int pkt_type)
  112. {
  113. struct rndis_packet *rndis_pkt;
  114. struct rndis_per_packet_info *ppi;
  115. rndis_pkt = &msg->msg.pkt;
  116. rndis_pkt->data_offset += ppi_size;
  117. ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
  118. rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
  119. ppi->size = ppi_size;
  120. ppi->type = pkt_type;
  121. ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
  122. rndis_pkt->per_pkt_info_len += ppi_size;
  123. return ppi;
  124. }
  125. union sub_key {
  126. u64 k;
  127. struct {
  128. u8 pad[3];
  129. u8 kb;
  130. u32 ka;
  131. };
  132. };
  133. /* Toeplitz hash function
  134. * data: network byte order
  135. * return: host byte order
  136. */
  137. static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
  138. {
  139. union sub_key subk;
  140. int k_next = 4;
  141. u8 dt;
  142. int i, j;
  143. u32 ret = 0;
  144. subk.k = 0;
  145. subk.ka = ntohl(*(u32 *)key);
  146. for (i = 0; i < dlen; i++) {
  147. subk.kb = key[k_next];
  148. k_next = (k_next + 1) % klen;
  149. dt = ((u8 *)data)[i];
  150. for (j = 0; j < 8; j++) {
  151. if (dt & 0x80)
  152. ret ^= subk.ka;
  153. dt <<= 1;
  154. subk.k <<= 1;
  155. }
  156. }
  157. return ret;
  158. }
  159. static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
  160. {
  161. struct flow_keys flow;
  162. int data_len;
  163. if (!skb_flow_dissect(skb, &flow) ||
  164. !(flow.n_proto == htons(ETH_P_IP) ||
  165. flow.n_proto == htons(ETH_P_IPV6)))
  166. return false;
  167. if (flow.ip_proto == IPPROTO_TCP)
  168. data_len = 12;
  169. else
  170. data_len = 8;
  171. *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
  172. return true;
  173. }
  174. static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
  175. void *accel_priv, select_queue_fallback_t fallback)
  176. {
  177. struct net_device_context *net_device_ctx = netdev_priv(ndev);
  178. struct hv_device *hdev = net_device_ctx->device_ctx;
  179. struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
  180. u32 hash;
  181. u16 q_idx = 0;
  182. if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
  183. return 0;
  184. if (netvsc_set_hash(&hash, skb)) {
  185. q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
  186. ndev->real_num_tx_queues;
  187. skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
  188. }
  189. return q_idx;
  190. }
  191. static void netvsc_xmit_completion(void *context)
  192. {
  193. struct hv_netvsc_packet *packet = (struct hv_netvsc_packet *)context;
  194. struct sk_buff *skb = (struct sk_buff *)
  195. (unsigned long)packet->send_completion_tid;
  196. u32 index = packet->send_buf_index;
  197. kfree(packet);
  198. if (skb && (index == NETVSC_INVALID_INDEX))
  199. dev_kfree_skb_any(skb);
  200. }
  201. static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
  202. struct hv_page_buffer *pb)
  203. {
  204. int j = 0;
  205. /* Deal with compund pages by ignoring unused part
  206. * of the page.
  207. */
  208. page += (offset >> PAGE_SHIFT);
  209. offset &= ~PAGE_MASK;
  210. while (len > 0) {
  211. unsigned long bytes;
  212. bytes = PAGE_SIZE - offset;
  213. if (bytes > len)
  214. bytes = len;
  215. pb[j].pfn = page_to_pfn(page);
  216. pb[j].offset = offset;
  217. pb[j].len = bytes;
  218. offset += bytes;
  219. len -= bytes;
  220. if (offset == PAGE_SIZE && len) {
  221. page++;
  222. offset = 0;
  223. j++;
  224. }
  225. }
  226. return j + 1;
  227. }
  228. static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
  229. struct hv_page_buffer *pb)
  230. {
  231. u32 slots_used = 0;
  232. char *data = skb->data;
  233. int frags = skb_shinfo(skb)->nr_frags;
  234. int i;
  235. /* The packet is laid out thus:
  236. * 1. hdr
  237. * 2. skb linear data
  238. * 3. skb fragment data
  239. */
  240. if (hdr != NULL)
  241. slots_used += fill_pg_buf(virt_to_page(hdr),
  242. offset_in_page(hdr),
  243. len, &pb[slots_used]);
  244. slots_used += fill_pg_buf(virt_to_page(data),
  245. offset_in_page(data),
  246. skb_headlen(skb), &pb[slots_used]);
  247. for (i = 0; i < frags; i++) {
  248. skb_frag_t *frag = skb_shinfo(skb)->frags + i;
  249. slots_used += fill_pg_buf(skb_frag_page(frag),
  250. frag->page_offset,
  251. skb_frag_size(frag), &pb[slots_used]);
  252. }
  253. return slots_used;
  254. }
  255. static int count_skb_frag_slots(struct sk_buff *skb)
  256. {
  257. int i, frags = skb_shinfo(skb)->nr_frags;
  258. int pages = 0;
  259. for (i = 0; i < frags; i++) {
  260. skb_frag_t *frag = skb_shinfo(skb)->frags + i;
  261. unsigned long size = skb_frag_size(frag);
  262. unsigned long offset = frag->page_offset;
  263. /* Skip unused frames from start of page */
  264. offset &= ~PAGE_MASK;
  265. pages += PFN_UP(offset + size);
  266. }
  267. return pages;
  268. }
  269. static int netvsc_get_slots(struct sk_buff *skb)
  270. {
  271. char *data = skb->data;
  272. unsigned int offset = offset_in_page(data);
  273. unsigned int len = skb_headlen(skb);
  274. int slots;
  275. int frag_slots;
  276. slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
  277. frag_slots = count_skb_frag_slots(skb);
  278. return slots + frag_slots;
  279. }
  280. static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
  281. {
  282. u32 ret_val = TRANSPORT_INFO_NOT_IP;
  283. if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
  284. (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
  285. goto not_ip;
  286. }
  287. *trans_off = skb_transport_offset(skb);
  288. if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
  289. struct iphdr *iphdr = ip_hdr(skb);
  290. if (iphdr->protocol == IPPROTO_TCP)
  291. ret_val = TRANSPORT_INFO_IPV4_TCP;
  292. else if (iphdr->protocol == IPPROTO_UDP)
  293. ret_val = TRANSPORT_INFO_IPV4_UDP;
  294. } else {
  295. if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
  296. ret_val = TRANSPORT_INFO_IPV6_TCP;
  297. else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
  298. ret_val = TRANSPORT_INFO_IPV6_UDP;
  299. }
  300. not_ip:
  301. return ret_val;
  302. }
  303. static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
  304. {
  305. struct net_device_context *net_device_ctx = netdev_priv(net);
  306. struct hv_netvsc_packet *packet;
  307. int ret;
  308. unsigned int num_data_pgs;
  309. struct rndis_message *rndis_msg;
  310. struct rndis_packet *rndis_pkt;
  311. u32 rndis_msg_size;
  312. bool isvlan;
  313. struct rndis_per_packet_info *ppi;
  314. struct ndis_tcp_ip_checksum_info *csum_info;
  315. struct ndis_tcp_lso_info *lso_info;
  316. int hdr_offset;
  317. u32 net_trans_info;
  318. u32 hash;
  319. u32 skb_length = skb->len;
  320. /* We will atmost need two pages to describe the rndis
  321. * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
  322. * of pages in a single packet.
  323. */
  324. num_data_pgs = netvsc_get_slots(skb) + 2;
  325. if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
  326. netdev_err(net, "Packet too big: %u\n", skb->len);
  327. dev_kfree_skb(skb);
  328. net->stats.tx_dropped++;
  329. return NETDEV_TX_OK;
  330. }
  331. /* Allocate a netvsc packet based on # of frags. */
  332. packet = kzalloc(sizeof(struct hv_netvsc_packet) +
  333. (num_data_pgs * sizeof(struct hv_page_buffer)) +
  334. sizeof(struct rndis_message) +
  335. NDIS_VLAN_PPI_SIZE + NDIS_CSUM_PPI_SIZE +
  336. NDIS_LSO_PPI_SIZE + NDIS_HASH_PPI_SIZE, GFP_ATOMIC);
  337. if (!packet) {
  338. /* out of memory, drop packet */
  339. netdev_err(net, "unable to allocate hv_netvsc_packet\n");
  340. dev_kfree_skb(skb);
  341. net->stats.tx_dropped++;
  342. return NETDEV_TX_OK;
  343. }
  344. packet->vlan_tci = skb->vlan_tci;
  345. packet->q_idx = skb_get_queue_mapping(skb);
  346. packet->is_data_pkt = true;
  347. packet->total_data_buflen = skb->len;
  348. packet->rndis_msg = (struct rndis_message *)((unsigned long)packet +
  349. sizeof(struct hv_netvsc_packet) +
  350. (num_data_pgs * sizeof(struct hv_page_buffer)));
  351. /* Set the completion routine */
  352. packet->send_completion = netvsc_xmit_completion;
  353. packet->send_completion_ctx = packet;
  354. packet->send_completion_tid = (unsigned long)skb;
  355. isvlan = packet->vlan_tci & VLAN_TAG_PRESENT;
  356. /* Add the rndis header */
  357. rndis_msg = packet->rndis_msg;
  358. rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
  359. rndis_msg->msg_len = packet->total_data_buflen;
  360. rndis_pkt = &rndis_msg->msg.pkt;
  361. rndis_pkt->data_offset = sizeof(struct rndis_packet);
  362. rndis_pkt->data_len = packet->total_data_buflen;
  363. rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
  364. rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
  365. hash = skb_get_hash_raw(skb);
  366. if (hash != 0 && net->real_num_tx_queues > 1) {
  367. rndis_msg_size += NDIS_HASH_PPI_SIZE;
  368. ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
  369. NBL_HASH_VALUE);
  370. *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
  371. }
  372. if (isvlan) {
  373. struct ndis_pkt_8021q_info *vlan;
  374. rndis_msg_size += NDIS_VLAN_PPI_SIZE;
  375. ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
  376. IEEE_8021Q_INFO);
  377. vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
  378. ppi->ppi_offset);
  379. vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK;
  380. vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >>
  381. VLAN_PRIO_SHIFT;
  382. }
  383. net_trans_info = get_net_transport_info(skb, &hdr_offset);
  384. if (net_trans_info == TRANSPORT_INFO_NOT_IP)
  385. goto do_send;
  386. /*
  387. * Setup the sendside checksum offload only if this is not a
  388. * GSO packet.
  389. */
  390. if (skb_is_gso(skb))
  391. goto do_lso;
  392. if ((skb->ip_summed == CHECKSUM_NONE) ||
  393. (skb->ip_summed == CHECKSUM_UNNECESSARY))
  394. goto do_send;
  395. rndis_msg_size += NDIS_CSUM_PPI_SIZE;
  396. ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
  397. TCPIP_CHKSUM_PKTINFO);
  398. csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
  399. ppi->ppi_offset);
  400. if (net_trans_info & (INFO_IPV4 << 16))
  401. csum_info->transmit.is_ipv4 = 1;
  402. else
  403. csum_info->transmit.is_ipv6 = 1;
  404. if (net_trans_info & INFO_TCP) {
  405. csum_info->transmit.tcp_checksum = 1;
  406. csum_info->transmit.tcp_header_offset = hdr_offset;
  407. } else if (net_trans_info & INFO_UDP) {
  408. /* UDP checksum offload is not supported on ws2008r2.
  409. * Furthermore, on ws2012 and ws2012r2, there are some
  410. * issues with udp checksum offload from Linux guests.
  411. * (these are host issues).
  412. * For now compute the checksum here.
  413. */
  414. struct udphdr *uh;
  415. u16 udp_len;
  416. ret = skb_cow_head(skb, 0);
  417. if (ret)
  418. goto drop;
  419. uh = udp_hdr(skb);
  420. udp_len = ntohs(uh->len);
  421. uh->check = 0;
  422. uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
  423. ip_hdr(skb)->daddr,
  424. udp_len, IPPROTO_UDP,
  425. csum_partial(uh, udp_len, 0));
  426. if (uh->check == 0)
  427. uh->check = CSUM_MANGLED_0;
  428. csum_info->transmit.udp_checksum = 0;
  429. }
  430. goto do_send;
  431. do_lso:
  432. rndis_msg_size += NDIS_LSO_PPI_SIZE;
  433. ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
  434. TCP_LARGESEND_PKTINFO);
  435. lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
  436. ppi->ppi_offset);
  437. lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
  438. if (net_trans_info & (INFO_IPV4 << 16)) {
  439. lso_info->lso_v2_transmit.ip_version =
  440. NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
  441. ip_hdr(skb)->tot_len = 0;
  442. ip_hdr(skb)->check = 0;
  443. tcp_hdr(skb)->check =
  444. ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
  445. ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
  446. } else {
  447. lso_info->lso_v2_transmit.ip_version =
  448. NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
  449. ipv6_hdr(skb)->payload_len = 0;
  450. tcp_hdr(skb)->check =
  451. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  452. &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
  453. }
  454. lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
  455. lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
  456. do_send:
  457. /* Start filling in the page buffers with the rndis hdr */
  458. rndis_msg->msg_len += rndis_msg_size;
  459. packet->total_data_buflen = rndis_msg->msg_len;
  460. packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
  461. skb, &packet->page_buf[0]);
  462. ret = netvsc_send(net_device_ctx->device_ctx, packet);
  463. drop:
  464. if (ret == 0) {
  465. net->stats.tx_bytes += skb_length;
  466. net->stats.tx_packets++;
  467. } else {
  468. kfree(packet);
  469. if (ret != -EAGAIN) {
  470. dev_kfree_skb_any(skb);
  471. net->stats.tx_dropped++;
  472. }
  473. }
  474. return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
  475. }
  476. /*
  477. * netvsc_linkstatus_callback - Link up/down notification
  478. */
  479. void netvsc_linkstatus_callback(struct hv_device *device_obj,
  480. struct rndis_message *resp)
  481. {
  482. struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
  483. struct net_device *net;
  484. struct net_device_context *ndev_ctx;
  485. struct netvsc_device *net_device;
  486. struct rndis_device *rdev;
  487. net_device = hv_get_drvdata(device_obj);
  488. rdev = net_device->extension;
  489. switch (indicate->status) {
  490. case RNDIS_STATUS_MEDIA_CONNECT:
  491. rdev->link_state = false;
  492. break;
  493. case RNDIS_STATUS_MEDIA_DISCONNECT:
  494. rdev->link_state = true;
  495. break;
  496. case RNDIS_STATUS_NETWORK_CHANGE:
  497. rdev->link_change = true;
  498. break;
  499. default:
  500. return;
  501. }
  502. net = net_device->ndev;
  503. if (!net || net->reg_state != NETREG_REGISTERED)
  504. return;
  505. ndev_ctx = netdev_priv(net);
  506. if (!rdev->link_state) {
  507. schedule_delayed_work(&ndev_ctx->dwork, 0);
  508. schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
  509. } else {
  510. schedule_delayed_work(&ndev_ctx->dwork, 0);
  511. }
  512. }
  513. /*
  514. * netvsc_recv_callback - Callback when we receive a packet from the
  515. * "wire" on the specified device.
  516. */
  517. int netvsc_recv_callback(struct hv_device *device_obj,
  518. struct hv_netvsc_packet *packet,
  519. struct ndis_tcp_ip_checksum_info *csum_info)
  520. {
  521. struct net_device *net;
  522. struct sk_buff *skb;
  523. net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
  524. if (!net || net->reg_state != NETREG_REGISTERED) {
  525. packet->status = NVSP_STAT_FAIL;
  526. return 0;
  527. }
  528. /* Allocate a skb - TODO direct I/O to pages? */
  529. skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
  530. if (unlikely(!skb)) {
  531. ++net->stats.rx_dropped;
  532. packet->status = NVSP_STAT_FAIL;
  533. return 0;
  534. }
  535. /*
  536. * Copy to skb. This copy is needed here since the memory pointed by
  537. * hv_netvsc_packet cannot be deallocated
  538. */
  539. memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
  540. packet->total_data_buflen);
  541. skb->protocol = eth_type_trans(skb, net);
  542. if (csum_info) {
  543. /* We only look at the IP checksum here.
  544. * Should we be dropping the packet if checksum
  545. * failed? How do we deal with other checksums - TCP/UDP?
  546. */
  547. if (csum_info->receive.ip_checksum_succeeded)
  548. skb->ip_summed = CHECKSUM_UNNECESSARY;
  549. else
  550. skb->ip_summed = CHECKSUM_NONE;
  551. }
  552. if (packet->vlan_tci & VLAN_TAG_PRESENT)
  553. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  554. packet->vlan_tci);
  555. skb_record_rx_queue(skb, packet->channel->
  556. offermsg.offer.sub_channel_index);
  557. net->stats.rx_packets++;
  558. net->stats.rx_bytes += packet->total_data_buflen;
  559. /*
  560. * Pass the skb back up. Network stack will deallocate the skb when it
  561. * is done.
  562. * TODO - use NAPI?
  563. */
  564. netif_rx(skb);
  565. return 0;
  566. }
  567. static void netvsc_get_drvinfo(struct net_device *net,
  568. struct ethtool_drvinfo *info)
  569. {
  570. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  571. strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
  572. }
  573. static void netvsc_get_channels(struct net_device *net,
  574. struct ethtool_channels *channel)
  575. {
  576. struct net_device_context *net_device_ctx = netdev_priv(net);
  577. struct hv_device *dev = net_device_ctx->device_ctx;
  578. struct netvsc_device *nvdev = hv_get_drvdata(dev);
  579. if (nvdev) {
  580. channel->max_combined = nvdev->max_chn;
  581. channel->combined_count = nvdev->num_chn;
  582. }
  583. }
  584. static int netvsc_change_mtu(struct net_device *ndev, int mtu)
  585. {
  586. struct net_device_context *ndevctx = netdev_priv(ndev);
  587. struct hv_device *hdev = ndevctx->device_ctx;
  588. struct netvsc_device *nvdev = hv_get_drvdata(hdev);
  589. struct netvsc_device_info device_info;
  590. int limit = ETH_DATA_LEN;
  591. if (nvdev == NULL || nvdev->destroy)
  592. return -ENODEV;
  593. if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
  594. limit = NETVSC_MTU - ETH_HLEN;
  595. /* Hyper-V hosts don't support MTU < ETH_DATA_LEN (1500) */
  596. if (mtu < ETH_DATA_LEN || mtu > limit)
  597. return -EINVAL;
  598. nvdev->start_remove = true;
  599. cancel_work_sync(&ndevctx->work);
  600. netif_tx_disable(ndev);
  601. rndis_filter_device_remove(hdev);
  602. ndev->mtu = mtu;
  603. ndevctx->device_ctx = hdev;
  604. hv_set_drvdata(hdev, ndev);
  605. device_info.ring_size = ring_size;
  606. rndis_filter_device_add(hdev, &device_info);
  607. netif_tx_wake_all_queues(ndev);
  608. return 0;
  609. }
  610. static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
  611. {
  612. struct net_device_context *ndevctx = netdev_priv(ndev);
  613. struct hv_device *hdev = ndevctx->device_ctx;
  614. struct sockaddr *addr = p;
  615. char save_adr[ETH_ALEN];
  616. unsigned char save_aatype;
  617. int err;
  618. memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
  619. save_aatype = ndev->addr_assign_type;
  620. err = eth_mac_addr(ndev, p);
  621. if (err != 0)
  622. return err;
  623. err = rndis_filter_set_device_mac(hdev, addr->sa_data);
  624. if (err != 0) {
  625. /* roll back to saved MAC */
  626. memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
  627. ndev->addr_assign_type = save_aatype;
  628. }
  629. return err;
  630. }
  631. #ifdef CONFIG_NET_POLL_CONTROLLER
  632. static void netvsc_poll_controller(struct net_device *net)
  633. {
  634. /* As netvsc_start_xmit() works synchronous we don't have to
  635. * trigger anything here.
  636. */
  637. }
  638. #endif
  639. static const struct ethtool_ops ethtool_ops = {
  640. .get_drvinfo = netvsc_get_drvinfo,
  641. .get_link = ethtool_op_get_link,
  642. .get_channels = netvsc_get_channels,
  643. };
  644. static const struct net_device_ops device_ops = {
  645. .ndo_open = netvsc_open,
  646. .ndo_stop = netvsc_close,
  647. .ndo_start_xmit = netvsc_start_xmit,
  648. .ndo_set_rx_mode = netvsc_set_multicast_list,
  649. .ndo_change_mtu = netvsc_change_mtu,
  650. .ndo_validate_addr = eth_validate_addr,
  651. .ndo_set_mac_address = netvsc_set_mac_addr,
  652. .ndo_select_queue = netvsc_select_queue,
  653. #ifdef CONFIG_NET_POLL_CONTROLLER
  654. .ndo_poll_controller = netvsc_poll_controller,
  655. #endif
  656. };
  657. /*
  658. * Send GARP packet to network peers after migrations.
  659. * After Quick Migration, the network is not immediately operational in the
  660. * current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
  661. * another netif_notify_peers() into a delayed work, otherwise GARP packet
  662. * will not be sent after quick migration, and cause network disconnection.
  663. * Also, we update the carrier status here.
  664. */
  665. static void netvsc_link_change(struct work_struct *w)
  666. {
  667. struct net_device_context *ndev_ctx;
  668. struct net_device *net;
  669. struct netvsc_device *net_device;
  670. struct rndis_device *rdev;
  671. bool notify, refresh = false;
  672. char *argv[] = { "/etc/init.d/network", "restart", NULL };
  673. char *envp[] = { "HOME=/", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
  674. rtnl_lock();
  675. ndev_ctx = container_of(w, struct net_device_context, dwork.work);
  676. net_device = hv_get_drvdata(ndev_ctx->device_ctx);
  677. rdev = net_device->extension;
  678. net = net_device->ndev;
  679. if (rdev->link_state) {
  680. netif_carrier_off(net);
  681. notify = false;
  682. } else {
  683. netif_carrier_on(net);
  684. notify = true;
  685. if (rdev->link_change) {
  686. rdev->link_change = false;
  687. refresh = true;
  688. }
  689. }
  690. rtnl_unlock();
  691. if (refresh)
  692. call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
  693. if (notify)
  694. netdev_notify_peers(net);
  695. }
  696. static int netvsc_probe(struct hv_device *dev,
  697. const struct hv_vmbus_device_id *dev_id)
  698. {
  699. struct net_device *net = NULL;
  700. struct net_device_context *net_device_ctx;
  701. struct netvsc_device_info device_info;
  702. struct netvsc_device *nvdev;
  703. int ret;
  704. net = alloc_etherdev_mq(sizeof(struct net_device_context),
  705. num_online_cpus());
  706. if (!net)
  707. return -ENOMEM;
  708. netif_carrier_off(net);
  709. net_device_ctx = netdev_priv(net);
  710. net_device_ctx->device_ctx = dev;
  711. hv_set_drvdata(dev, net);
  712. INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
  713. INIT_WORK(&net_device_ctx->work, do_set_multicast);
  714. net->netdev_ops = &device_ops;
  715. net->hw_features = NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_IP_CSUM |
  716. NETIF_F_TSO;
  717. net->features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_SG | NETIF_F_RXCSUM |
  718. NETIF_F_IP_CSUM | NETIF_F_TSO;
  719. net->ethtool_ops = &ethtool_ops;
  720. SET_NETDEV_DEV(net, &dev->device);
  721. /* Notify the netvsc driver of the new device */
  722. device_info.ring_size = ring_size;
  723. ret = rndis_filter_device_add(dev, &device_info);
  724. if (ret != 0) {
  725. netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
  726. free_netdev(net);
  727. hv_set_drvdata(dev, NULL);
  728. return ret;
  729. }
  730. memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
  731. nvdev = hv_get_drvdata(dev);
  732. netif_set_real_num_tx_queues(net, nvdev->num_chn);
  733. netif_set_real_num_rx_queues(net, nvdev->num_chn);
  734. ret = register_netdev(net);
  735. if (ret != 0) {
  736. pr_err("Unable to register netdev.\n");
  737. rndis_filter_device_remove(dev);
  738. free_netdev(net);
  739. } else {
  740. schedule_delayed_work(&net_device_ctx->dwork, 0);
  741. }
  742. return ret;
  743. }
  744. static int netvsc_remove(struct hv_device *dev)
  745. {
  746. struct net_device *net;
  747. struct net_device_context *ndev_ctx;
  748. struct netvsc_device *net_device;
  749. net_device = hv_get_drvdata(dev);
  750. net = net_device->ndev;
  751. if (net == NULL) {
  752. dev_err(&dev->device, "No net device to remove\n");
  753. return 0;
  754. }
  755. net_device->start_remove = true;
  756. ndev_ctx = netdev_priv(net);
  757. cancel_delayed_work_sync(&ndev_ctx->dwork);
  758. cancel_work_sync(&ndev_ctx->work);
  759. /* Stop outbound asap */
  760. netif_tx_disable(net);
  761. unregister_netdev(net);
  762. /*
  763. * Call to the vsc driver to let it know that the device is being
  764. * removed
  765. */
  766. rndis_filter_device_remove(dev);
  767. free_netdev(net);
  768. return 0;
  769. }
  770. static const struct hv_vmbus_device_id id_table[] = {
  771. /* Network guid */
  772. { HV_NIC_GUID, },
  773. { },
  774. };
  775. MODULE_DEVICE_TABLE(vmbus, id_table);
  776. /* The one and only one */
  777. static struct hv_driver netvsc_drv = {
  778. .name = KBUILD_MODNAME,
  779. .id_table = id_table,
  780. .probe = netvsc_probe,
  781. .remove = netvsc_remove,
  782. };
  783. static void __exit netvsc_drv_exit(void)
  784. {
  785. vmbus_driver_unregister(&netvsc_drv);
  786. }
  787. static int __init netvsc_drv_init(void)
  788. {
  789. if (ring_size < RING_SIZE_MIN) {
  790. ring_size = RING_SIZE_MIN;
  791. pr_info("Increased ring_size to %d (min allowed)\n",
  792. ring_size);
  793. }
  794. return vmbus_driver_register(&netvsc_drv);
  795. }
  796. MODULE_LICENSE("GPL");
  797. MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
  798. module_init(netvsc_drv_init);
  799. module_exit(netvsc_drv_exit);