netvsc_drv.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145
  1. /*
  2. * Copyright (c) 2009, Microsoft Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, see <http://www.gnu.org/licenses/>.
  15. *
  16. * Authors:
  17. * Haiyang Zhang <haiyangz@microsoft.com>
  18. * Hank Janssen <hjanssen@microsoft.com>
  19. */
  20. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21. #include <linux/init.h>
  22. #include <linux/atomic.h>
  23. #include <linux/module.h>
  24. #include <linux/highmem.h>
  25. #include <linux/device.h>
  26. #include <linux/io.h>
  27. #include <linux/delay.h>
  28. #include <linux/netdevice.h>
  29. #include <linux/inetdevice.h>
  30. #include <linux/etherdevice.h>
  31. #include <linux/skbuff.h>
  32. #include <linux/if_vlan.h>
  33. #include <linux/in.h>
  34. #include <linux/slab.h>
  35. #include <linux/rtnetlink.h>
  36. #include <linux/netpoll.h>
  37. #include <net/arp.h>
  38. #include <net/route.h>
  39. #include <net/sock.h>
  40. #include <net/pkt_sched.h>
  41. #include <net/checksum.h>
  42. #include <net/ip6_checksum.h>
  43. #include <net/failover.h>
  44. #include "hyperv_net.h"
  45. #define RING_SIZE_MIN 64
  46. #define RETRY_US_LO 5000
  47. #define RETRY_US_HI 10000
  48. #define RETRY_MAX 2000 /* >10 sec */
  49. #define LINKCHANGE_INT (2 * HZ)
  50. #define VF_TAKEOVER_INT (HZ / 10)
  51. static unsigned int ring_size __ro_after_init = 128;
  52. module_param(ring_size, uint, 0444);
  53. MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
  54. unsigned int netvsc_ring_bytes __ro_after_init;
  55. static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
  56. NETIF_MSG_LINK | NETIF_MSG_IFUP |
  57. NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
  58. NETIF_MSG_TX_ERR;
  59. static int debug = -1;
  60. module_param(debug, int, 0444);
  61. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  62. static void netvsc_change_rx_flags(struct net_device *net, int change)
  63. {
  64. struct net_device_context *ndev_ctx = netdev_priv(net);
  65. struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
  66. int inc;
  67. if (!vf_netdev)
  68. return;
  69. if (change & IFF_PROMISC) {
  70. inc = (net->flags & IFF_PROMISC) ? 1 : -1;
  71. dev_set_promiscuity(vf_netdev, inc);
  72. }
  73. if (change & IFF_ALLMULTI) {
  74. inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
  75. dev_set_allmulti(vf_netdev, inc);
  76. }
  77. }
  78. static void netvsc_set_rx_mode(struct net_device *net)
  79. {
  80. struct net_device_context *ndev_ctx = netdev_priv(net);
  81. struct net_device *vf_netdev;
  82. struct netvsc_device *nvdev;
  83. rcu_read_lock();
  84. vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
  85. if (vf_netdev) {
  86. dev_uc_sync(vf_netdev, net);
  87. dev_mc_sync(vf_netdev, net);
  88. }
  89. nvdev = rcu_dereference(ndev_ctx->nvdev);
  90. if (nvdev)
  91. rndis_filter_update(nvdev);
  92. rcu_read_unlock();
  93. }
  94. static int netvsc_open(struct net_device *net)
  95. {
  96. struct net_device_context *ndev_ctx = netdev_priv(net);
  97. struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
  98. struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
  99. struct rndis_device *rdev;
  100. int ret = 0;
  101. netif_carrier_off(net);
  102. /* Open up the device */
  103. ret = rndis_filter_open(nvdev);
  104. if (ret != 0) {
  105. netdev_err(net, "unable to open device (ret %d).\n", ret);
  106. return ret;
  107. }
  108. rdev = nvdev->extension;
  109. if (!rdev->link_state) {
  110. netif_carrier_on(net);
  111. netif_tx_wake_all_queues(net);
  112. }
  113. if (vf_netdev) {
  114. /* Setting synthetic device up transparently sets
  115. * slave as up. If open fails, then slave will be
  116. * still be offline (and not used).
  117. */
  118. ret = dev_open(vf_netdev);
  119. if (ret)
  120. netdev_warn(net,
  121. "unable to open slave: %s: %d\n",
  122. vf_netdev->name, ret);
  123. }
  124. return 0;
  125. }
  126. static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
  127. {
  128. unsigned int retry = 0;
  129. int i;
  130. /* Ensure pending bytes in ring are read */
  131. for (;;) {
  132. u32 aread = 0;
  133. for (i = 0; i < nvdev->num_chn; i++) {
  134. struct vmbus_channel *chn
  135. = nvdev->chan_table[i].channel;
  136. if (!chn)
  137. continue;
  138. /* make sure receive not running now */
  139. napi_synchronize(&nvdev->chan_table[i].napi);
  140. aread = hv_get_bytes_to_read(&chn->inbound);
  141. if (aread)
  142. break;
  143. aread = hv_get_bytes_to_read(&chn->outbound);
  144. if (aread)
  145. break;
  146. }
  147. if (aread == 0)
  148. return 0;
  149. if (++retry > RETRY_MAX)
  150. return -ETIMEDOUT;
  151. usleep_range(RETRY_US_LO, RETRY_US_HI);
  152. }
  153. }
  154. static int netvsc_close(struct net_device *net)
  155. {
  156. struct net_device_context *net_device_ctx = netdev_priv(net);
  157. struct net_device *vf_netdev
  158. = rtnl_dereference(net_device_ctx->vf_netdev);
  159. struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
  160. int ret;
  161. netif_tx_disable(net);
  162. /* No need to close rndis filter if it is removed already */
  163. if (!nvdev)
  164. return 0;
  165. ret = rndis_filter_close(nvdev);
  166. if (ret != 0) {
  167. netdev_err(net, "unable to close device (ret %d).\n", ret);
  168. return ret;
  169. }
  170. ret = netvsc_wait_until_empty(nvdev);
  171. if (ret)
  172. netdev_err(net, "Ring buffer not empty after closing rndis\n");
  173. if (vf_netdev)
  174. dev_close(vf_netdev);
  175. return ret;
  176. }
  177. static inline void *init_ppi_data(struct rndis_message *msg,
  178. u32 ppi_size, u32 pkt_type)
  179. {
  180. struct rndis_packet *rndis_pkt = &msg->msg.pkt;
  181. struct rndis_per_packet_info *ppi;
  182. rndis_pkt->data_offset += ppi_size;
  183. ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
  184. + rndis_pkt->per_pkt_info_len;
  185. ppi->size = ppi_size;
  186. ppi->type = pkt_type;
  187. ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
  188. rndis_pkt->per_pkt_info_len += ppi_size;
  189. return ppi + 1;
  190. }
  191. /* Azure hosts don't support non-TCP port numbers in hashing for fragmented
  192. * packets. We can use ethtool to change UDP hash level when necessary.
  193. */
  194. static inline u32 netvsc_get_hash(
  195. struct sk_buff *skb,
  196. const struct net_device_context *ndc)
  197. {
  198. struct flow_keys flow;
  199. u32 hash, pkt_proto = 0;
  200. static u32 hashrnd __read_mostly;
  201. net_get_random_once(&hashrnd, sizeof(hashrnd));
  202. if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
  203. return 0;
  204. switch (flow.basic.ip_proto) {
  205. case IPPROTO_TCP:
  206. if (flow.basic.n_proto == htons(ETH_P_IP))
  207. pkt_proto = HV_TCP4_L4HASH;
  208. else if (flow.basic.n_proto == htons(ETH_P_IPV6))
  209. pkt_proto = HV_TCP6_L4HASH;
  210. break;
  211. case IPPROTO_UDP:
  212. if (flow.basic.n_proto == htons(ETH_P_IP))
  213. pkt_proto = HV_UDP4_L4HASH;
  214. else if (flow.basic.n_proto == htons(ETH_P_IPV6))
  215. pkt_proto = HV_UDP6_L4HASH;
  216. break;
  217. }
  218. if (pkt_proto & ndc->l4_hash) {
  219. return skb_get_hash(skb);
  220. } else {
  221. if (flow.basic.n_proto == htons(ETH_P_IP))
  222. hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
  223. else if (flow.basic.n_proto == htons(ETH_P_IPV6))
  224. hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
  225. else
  226. hash = 0;
  227. skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
  228. }
  229. return hash;
  230. }
  231. static inline int netvsc_get_tx_queue(struct net_device *ndev,
  232. struct sk_buff *skb, int old_idx)
  233. {
  234. const struct net_device_context *ndc = netdev_priv(ndev);
  235. struct sock *sk = skb->sk;
  236. int q_idx;
  237. q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
  238. (VRSS_SEND_TAB_SIZE - 1)];
  239. /* If queue index changed record the new value */
  240. if (q_idx != old_idx &&
  241. sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
  242. sk_tx_queue_set(sk, q_idx);
  243. return q_idx;
  244. }
  245. /*
  246. * Select queue for transmit.
  247. *
  248. * If a valid queue has already been assigned, then use that.
  249. * Otherwise compute tx queue based on hash and the send table.
  250. *
  251. * This is basically similar to default (__netdev_pick_tx) with the added step
  252. * of using the host send_table when no other queue has been assigned.
  253. *
  254. * TODO support XPS - but get_xps_queue not exported
  255. */
  256. static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
  257. {
  258. int q_idx = sk_tx_queue_get(skb->sk);
  259. if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
  260. /* If forwarding a packet, we use the recorded queue when
  261. * available for better cache locality.
  262. */
  263. if (skb_rx_queue_recorded(skb))
  264. q_idx = skb_get_rx_queue(skb);
  265. else
  266. q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
  267. }
  268. return q_idx;
  269. }
  270. static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
  271. void *accel_priv,
  272. select_queue_fallback_t fallback)
  273. {
  274. struct net_device_context *ndc = netdev_priv(ndev);
  275. struct net_device *vf_netdev;
  276. u16 txq;
  277. rcu_read_lock();
  278. vf_netdev = rcu_dereference(ndc->vf_netdev);
  279. if (vf_netdev) {
  280. const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
  281. if (vf_ops->ndo_select_queue)
  282. txq = vf_ops->ndo_select_queue(vf_netdev, skb,
  283. accel_priv, fallback);
  284. else
  285. txq = fallback(vf_netdev, skb);
  286. /* Record the queue selected by VF so that it can be
  287. * used for common case where VF has more queues than
  288. * the synthetic device.
  289. */
  290. qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
  291. } else {
  292. txq = netvsc_pick_tx(ndev, skb);
  293. }
  294. rcu_read_unlock();
  295. while (unlikely(txq >= ndev->real_num_tx_queues))
  296. txq -= ndev->real_num_tx_queues;
  297. return txq;
  298. }
  299. static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
  300. struct hv_page_buffer *pb)
  301. {
  302. int j = 0;
  303. /* Deal with compund pages by ignoring unused part
  304. * of the page.
  305. */
  306. page += (offset >> PAGE_SHIFT);
  307. offset &= ~PAGE_MASK;
  308. while (len > 0) {
  309. unsigned long bytes;
  310. bytes = PAGE_SIZE - offset;
  311. if (bytes > len)
  312. bytes = len;
  313. pb[j].pfn = page_to_pfn(page);
  314. pb[j].offset = offset;
  315. pb[j].len = bytes;
  316. offset += bytes;
  317. len -= bytes;
  318. if (offset == PAGE_SIZE && len) {
  319. page++;
  320. offset = 0;
  321. j++;
  322. }
  323. }
  324. return j + 1;
  325. }
  326. static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
  327. struct hv_netvsc_packet *packet,
  328. struct hv_page_buffer *pb)
  329. {
  330. u32 slots_used = 0;
  331. char *data = skb->data;
  332. int frags = skb_shinfo(skb)->nr_frags;
  333. int i;
  334. /* The packet is laid out thus:
  335. * 1. hdr: RNDIS header and PPI
  336. * 2. skb linear data
  337. * 3. skb fragment data
  338. */
  339. slots_used += fill_pg_buf(virt_to_page(hdr),
  340. offset_in_page(hdr),
  341. len, &pb[slots_used]);
  342. packet->rmsg_size = len;
  343. packet->rmsg_pgcnt = slots_used;
  344. slots_used += fill_pg_buf(virt_to_page(data),
  345. offset_in_page(data),
  346. skb_headlen(skb), &pb[slots_used]);
  347. for (i = 0; i < frags; i++) {
  348. skb_frag_t *frag = skb_shinfo(skb)->frags + i;
  349. slots_used += fill_pg_buf(skb_frag_page(frag),
  350. frag->page_offset,
  351. skb_frag_size(frag), &pb[slots_used]);
  352. }
  353. return slots_used;
  354. }
  355. static int count_skb_frag_slots(struct sk_buff *skb)
  356. {
  357. int i, frags = skb_shinfo(skb)->nr_frags;
  358. int pages = 0;
  359. for (i = 0; i < frags; i++) {
  360. skb_frag_t *frag = skb_shinfo(skb)->frags + i;
  361. unsigned long size = skb_frag_size(frag);
  362. unsigned long offset = frag->page_offset;
  363. /* Skip unused frames from start of page */
  364. offset &= ~PAGE_MASK;
  365. pages += PFN_UP(offset + size);
  366. }
  367. return pages;
  368. }
  369. static int netvsc_get_slots(struct sk_buff *skb)
  370. {
  371. char *data = skb->data;
  372. unsigned int offset = offset_in_page(data);
  373. unsigned int len = skb_headlen(skb);
  374. int slots;
  375. int frag_slots;
  376. slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
  377. frag_slots = count_skb_frag_slots(skb);
  378. return slots + frag_slots;
  379. }
  380. static u32 net_checksum_info(struct sk_buff *skb)
  381. {
  382. if (skb->protocol == htons(ETH_P_IP)) {
  383. struct iphdr *ip = ip_hdr(skb);
  384. if (ip->protocol == IPPROTO_TCP)
  385. return TRANSPORT_INFO_IPV4_TCP;
  386. else if (ip->protocol == IPPROTO_UDP)
  387. return TRANSPORT_INFO_IPV4_UDP;
  388. } else {
  389. struct ipv6hdr *ip6 = ipv6_hdr(skb);
  390. if (ip6->nexthdr == IPPROTO_TCP)
  391. return TRANSPORT_INFO_IPV6_TCP;
  392. else if (ip6->nexthdr == IPPROTO_UDP)
  393. return TRANSPORT_INFO_IPV6_UDP;
  394. }
  395. return TRANSPORT_INFO_NOT_IP;
  396. }
  397. /* Send skb on the slave VF device. */
  398. static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
  399. struct sk_buff *skb)
  400. {
  401. struct net_device_context *ndev_ctx = netdev_priv(net);
  402. unsigned int len = skb->len;
  403. int rc;
  404. skb->dev = vf_netdev;
  405. skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
  406. rc = dev_queue_xmit(skb);
  407. if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
  408. struct netvsc_vf_pcpu_stats *pcpu_stats
  409. = this_cpu_ptr(ndev_ctx->vf_stats);
  410. u64_stats_update_begin(&pcpu_stats->syncp);
  411. pcpu_stats->tx_packets++;
  412. pcpu_stats->tx_bytes += len;
  413. u64_stats_update_end(&pcpu_stats->syncp);
  414. } else {
  415. this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
  416. }
  417. return rc;
  418. }
  419. static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
  420. {
  421. struct net_device_context *net_device_ctx = netdev_priv(net);
  422. struct hv_netvsc_packet *packet = NULL;
  423. int ret;
  424. unsigned int num_data_pgs;
  425. struct rndis_message *rndis_msg;
  426. struct net_device *vf_netdev;
  427. u32 rndis_msg_size;
  428. u32 hash;
  429. struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
  430. /* if VF is present and up then redirect packets
  431. * already called with rcu_read_lock_bh
  432. */
  433. vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
  434. if (vf_netdev && netif_running(vf_netdev) &&
  435. !netpoll_tx_running(net))
  436. return netvsc_vf_xmit(net, vf_netdev, skb);
  437. /* We will atmost need two pages to describe the rndis
  438. * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
  439. * of pages in a single packet. If skb is scattered around
  440. * more pages we try linearizing it.
  441. */
  442. num_data_pgs = netvsc_get_slots(skb) + 2;
  443. if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
  444. ++net_device_ctx->eth_stats.tx_scattered;
  445. if (skb_linearize(skb))
  446. goto no_memory;
  447. num_data_pgs = netvsc_get_slots(skb) + 2;
  448. if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
  449. ++net_device_ctx->eth_stats.tx_too_big;
  450. goto drop;
  451. }
  452. }
  453. /*
  454. * Place the rndis header in the skb head room and
  455. * the skb->cb will be used for hv_netvsc_packet
  456. * structure.
  457. */
  458. ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
  459. if (ret)
  460. goto no_memory;
  461. /* Use the skb control buffer for building up the packet */
  462. BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
  463. FIELD_SIZEOF(struct sk_buff, cb));
  464. packet = (struct hv_netvsc_packet *)skb->cb;
  465. packet->q_idx = skb_get_queue_mapping(skb);
  466. packet->total_data_buflen = skb->len;
  467. packet->total_bytes = skb->len;
  468. packet->total_packets = 1;
  469. rndis_msg = (struct rndis_message *)skb->head;
  470. /* Add the rndis header */
  471. rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
  472. rndis_msg->msg_len = packet->total_data_buflen;
  473. rndis_msg->msg.pkt = (struct rndis_packet) {
  474. .data_offset = sizeof(struct rndis_packet),
  475. .data_len = packet->total_data_buflen,
  476. .per_pkt_info_offset = sizeof(struct rndis_packet),
  477. };
  478. rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
  479. hash = skb_get_hash_raw(skb);
  480. if (hash != 0 && net->real_num_tx_queues > 1) {
  481. u32 *hash_info;
  482. rndis_msg_size += NDIS_HASH_PPI_SIZE;
  483. hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
  484. NBL_HASH_VALUE);
  485. *hash_info = hash;
  486. }
  487. if (skb_vlan_tag_present(skb)) {
  488. struct ndis_pkt_8021q_info *vlan;
  489. rndis_msg_size += NDIS_VLAN_PPI_SIZE;
  490. vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
  491. IEEE_8021Q_INFO);
  492. vlan->value = 0;
  493. vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
  494. vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
  495. VLAN_PRIO_SHIFT;
  496. }
  497. if (skb_is_gso(skb)) {
  498. struct ndis_tcp_lso_info *lso_info;
  499. rndis_msg_size += NDIS_LSO_PPI_SIZE;
  500. lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
  501. TCP_LARGESEND_PKTINFO);
  502. lso_info->value = 0;
  503. lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
  504. if (skb->protocol == htons(ETH_P_IP)) {
  505. lso_info->lso_v2_transmit.ip_version =
  506. NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
  507. ip_hdr(skb)->tot_len = 0;
  508. ip_hdr(skb)->check = 0;
  509. tcp_hdr(skb)->check =
  510. ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
  511. ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
  512. } else {
  513. lso_info->lso_v2_transmit.ip_version =
  514. NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
  515. ipv6_hdr(skb)->payload_len = 0;
  516. tcp_hdr(skb)->check =
  517. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  518. &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
  519. }
  520. lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
  521. lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
  522. } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  523. if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
  524. struct ndis_tcp_ip_checksum_info *csum_info;
  525. rndis_msg_size += NDIS_CSUM_PPI_SIZE;
  526. csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
  527. TCPIP_CHKSUM_PKTINFO);
  528. csum_info->value = 0;
  529. csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
  530. if (skb->protocol == htons(ETH_P_IP)) {
  531. csum_info->transmit.is_ipv4 = 1;
  532. if (ip_hdr(skb)->protocol == IPPROTO_TCP)
  533. csum_info->transmit.tcp_checksum = 1;
  534. else
  535. csum_info->transmit.udp_checksum = 1;
  536. } else {
  537. csum_info->transmit.is_ipv6 = 1;
  538. if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
  539. csum_info->transmit.tcp_checksum = 1;
  540. else
  541. csum_info->transmit.udp_checksum = 1;
  542. }
  543. } else {
  544. /* Can't do offload of this type of checksum */
  545. if (skb_checksum_help(skb))
  546. goto drop;
  547. }
  548. }
  549. /* Start filling in the page buffers with the rndis hdr */
  550. rndis_msg->msg_len += rndis_msg_size;
  551. packet->total_data_buflen = rndis_msg->msg_len;
  552. packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
  553. skb, packet, pb);
  554. /* timestamp packet in software */
  555. skb_tx_timestamp(skb);
  556. ret = netvsc_send(net, packet, rndis_msg, pb, skb);
  557. if (likely(ret == 0))
  558. return NETDEV_TX_OK;
  559. if (ret == -EAGAIN) {
  560. ++net_device_ctx->eth_stats.tx_busy;
  561. return NETDEV_TX_BUSY;
  562. }
  563. if (ret == -ENOSPC)
  564. ++net_device_ctx->eth_stats.tx_no_space;
  565. drop:
  566. dev_kfree_skb_any(skb);
  567. net->stats.tx_dropped++;
  568. return NETDEV_TX_OK;
  569. no_memory:
  570. ++net_device_ctx->eth_stats.tx_no_memory;
  571. goto drop;
  572. }
  573. /*
  574. * netvsc_linkstatus_callback - Link up/down notification
  575. */
  576. void netvsc_linkstatus_callback(struct net_device *net,
  577. struct rndis_message *resp)
  578. {
  579. struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
  580. struct net_device_context *ndev_ctx = netdev_priv(net);
  581. struct netvsc_reconfig *event;
  582. unsigned long flags;
  583. /* Update the physical link speed when changing to another vSwitch */
  584. if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
  585. u32 speed;
  586. speed = *(u32 *)((void *)indicate
  587. + indicate->status_buf_offset) / 10000;
  588. ndev_ctx->speed = speed;
  589. return;
  590. }
  591. /* Handle these link change statuses below */
  592. if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
  593. indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
  594. indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
  595. return;
  596. if (net->reg_state != NETREG_REGISTERED)
  597. return;
  598. event = kzalloc(sizeof(*event), GFP_ATOMIC);
  599. if (!event)
  600. return;
  601. event->event = indicate->status;
  602. spin_lock_irqsave(&ndev_ctx->lock, flags);
  603. list_add_tail(&event->list, &ndev_ctx->reconfig_events);
  604. spin_unlock_irqrestore(&ndev_ctx->lock, flags);
  605. schedule_delayed_work(&ndev_ctx->dwork, 0);
  606. }
  607. static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
  608. struct napi_struct *napi,
  609. const struct ndis_tcp_ip_checksum_info *csum_info,
  610. const struct ndis_pkt_8021q_info *vlan,
  611. void *data, u32 buflen)
  612. {
  613. struct sk_buff *skb;
  614. skb = napi_alloc_skb(napi, buflen);
  615. if (!skb)
  616. return skb;
  617. /*
  618. * Copy to skb. This copy is needed here since the memory pointed by
  619. * hv_netvsc_packet cannot be deallocated
  620. */
  621. skb_put_data(skb, data, buflen);
  622. skb->protocol = eth_type_trans(skb, net);
  623. /* skb is already created with CHECKSUM_NONE */
  624. skb_checksum_none_assert(skb);
  625. /*
  626. * In Linux, the IP checksum is always checked.
  627. * Do L4 checksum offload if enabled and present.
  628. */
  629. if (csum_info && (net->features & NETIF_F_RXCSUM)) {
  630. if (csum_info->receive.tcp_checksum_succeeded ||
  631. csum_info->receive.udp_checksum_succeeded)
  632. skb->ip_summed = CHECKSUM_UNNECESSARY;
  633. }
  634. if (vlan) {
  635. u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
  636. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
  637. vlan_tci);
  638. }
  639. return skb;
  640. }
  641. /*
  642. * netvsc_recv_callback - Callback when we receive a packet from the
  643. * "wire" on the specified device.
  644. */
  645. int netvsc_recv_callback(struct net_device *net,
  646. struct netvsc_device *net_device,
  647. struct vmbus_channel *channel,
  648. void *data, u32 len,
  649. const struct ndis_tcp_ip_checksum_info *csum_info,
  650. const struct ndis_pkt_8021q_info *vlan)
  651. {
  652. struct net_device_context *net_device_ctx = netdev_priv(net);
  653. u16 q_idx = channel->offermsg.offer.sub_channel_index;
  654. struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
  655. struct sk_buff *skb;
  656. struct netvsc_stats *rx_stats;
  657. if (net->reg_state != NETREG_REGISTERED)
  658. return NVSP_STAT_FAIL;
  659. /* Allocate a skb - TODO direct I/O to pages? */
  660. skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
  661. csum_info, vlan, data, len);
  662. if (unlikely(!skb)) {
  663. ++net_device_ctx->eth_stats.rx_no_memory;
  664. rcu_read_unlock();
  665. return NVSP_STAT_FAIL;
  666. }
  667. skb_record_rx_queue(skb, q_idx);
  668. /*
  669. * Even if injecting the packet, record the statistics
  670. * on the synthetic device because modifying the VF device
  671. * statistics will not work correctly.
  672. */
  673. rx_stats = &nvchan->rx_stats;
  674. u64_stats_update_begin(&rx_stats->syncp);
  675. rx_stats->packets++;
  676. rx_stats->bytes += len;
  677. if (skb->pkt_type == PACKET_BROADCAST)
  678. ++rx_stats->broadcast;
  679. else if (skb->pkt_type == PACKET_MULTICAST)
  680. ++rx_stats->multicast;
  681. u64_stats_update_end(&rx_stats->syncp);
  682. napi_gro_receive(&nvchan->napi, skb);
  683. return NVSP_STAT_SUCCESS;
  684. }
  685. static void netvsc_get_drvinfo(struct net_device *net,
  686. struct ethtool_drvinfo *info)
  687. {
  688. strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
  689. strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
  690. }
  691. static void netvsc_get_channels(struct net_device *net,
  692. struct ethtool_channels *channel)
  693. {
  694. struct net_device_context *net_device_ctx = netdev_priv(net);
  695. struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
  696. if (nvdev) {
  697. channel->max_combined = nvdev->max_chn;
  698. channel->combined_count = nvdev->num_chn;
  699. }
  700. }
  701. static int netvsc_detach(struct net_device *ndev,
  702. struct netvsc_device *nvdev)
  703. {
  704. struct net_device_context *ndev_ctx = netdev_priv(ndev);
  705. struct hv_device *hdev = ndev_ctx->device_ctx;
  706. int ret;
  707. /* Don't try continuing to try and setup sub channels */
  708. if (cancel_work_sync(&nvdev->subchan_work))
  709. nvdev->num_chn = 1;
  710. /* If device was up (receiving) then shutdown */
  711. if (netif_running(ndev)) {
  712. netif_tx_disable(ndev);
  713. ret = rndis_filter_close(nvdev);
  714. if (ret) {
  715. netdev_err(ndev,
  716. "unable to close device (ret %d).\n", ret);
  717. return ret;
  718. }
  719. ret = netvsc_wait_until_empty(nvdev);
  720. if (ret) {
  721. netdev_err(ndev,
  722. "Ring buffer not empty after closing rndis\n");
  723. return ret;
  724. }
  725. }
  726. netif_device_detach(ndev);
  727. rndis_filter_device_remove(hdev, nvdev);
  728. return 0;
  729. }
  730. static int netvsc_attach(struct net_device *ndev,
  731. struct netvsc_device_info *dev_info)
  732. {
  733. struct net_device_context *ndev_ctx = netdev_priv(ndev);
  734. struct hv_device *hdev = ndev_ctx->device_ctx;
  735. struct netvsc_device *nvdev;
  736. struct rndis_device *rdev;
  737. int ret;
  738. nvdev = rndis_filter_device_add(hdev, dev_info);
  739. if (IS_ERR(nvdev))
  740. return PTR_ERR(nvdev);
  741. /* Note: enable and attach happen when sub-channels setup */
  742. netif_carrier_off(ndev);
  743. if (netif_running(ndev)) {
  744. ret = rndis_filter_open(nvdev);
  745. if (ret)
  746. return ret;
  747. rdev = nvdev->extension;
  748. if (!rdev->link_state)
  749. netif_carrier_on(ndev);
  750. }
  751. return 0;
  752. }
  753. static int netvsc_set_channels(struct net_device *net,
  754. struct ethtool_channels *channels)
  755. {
  756. struct net_device_context *net_device_ctx = netdev_priv(net);
  757. struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
  758. unsigned int orig, count = channels->combined_count;
  759. struct netvsc_device_info device_info;
  760. int ret;
  761. /* We do not support separate count for rx, tx, or other */
  762. if (count == 0 ||
  763. channels->rx_count || channels->tx_count || channels->other_count)
  764. return -EINVAL;
  765. if (!nvdev || nvdev->destroy)
  766. return -ENODEV;
  767. if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
  768. return -EINVAL;
  769. if (count > nvdev->max_chn)
  770. return -EINVAL;
  771. orig = nvdev->num_chn;
  772. memset(&device_info, 0, sizeof(device_info));
  773. device_info.num_chn = count;
  774. device_info.send_sections = nvdev->send_section_cnt;
  775. device_info.send_section_size = nvdev->send_section_size;
  776. device_info.recv_sections = nvdev->recv_section_cnt;
  777. device_info.recv_section_size = nvdev->recv_section_size;
  778. ret = netvsc_detach(net, nvdev);
  779. if (ret)
  780. return ret;
  781. ret = netvsc_attach(net, &device_info);
  782. if (ret) {
  783. device_info.num_chn = orig;
  784. if (netvsc_attach(net, &device_info))
  785. netdev_err(net, "restoring channel setting failed\n");
  786. }
  787. return ret;
  788. }
  789. static bool
  790. netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
  791. {
  792. struct ethtool_link_ksettings diff1 = *cmd;
  793. struct ethtool_link_ksettings diff2 = {};
  794. diff1.base.speed = 0;
  795. diff1.base.duplex = 0;
  796. /* advertising and cmd are usually set */
  797. ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
  798. diff1.base.cmd = 0;
  799. /* We set port to PORT_OTHER */
  800. diff2.base.port = PORT_OTHER;
  801. return !memcmp(&diff1, &diff2, sizeof(diff1));
  802. }
  803. static void netvsc_init_settings(struct net_device *dev)
  804. {
  805. struct net_device_context *ndc = netdev_priv(dev);
  806. ndc->l4_hash = HV_DEFAULT_L4HASH;
  807. ndc->speed = SPEED_UNKNOWN;
  808. ndc->duplex = DUPLEX_FULL;
  809. }
  810. static int netvsc_get_link_ksettings(struct net_device *dev,
  811. struct ethtool_link_ksettings *cmd)
  812. {
  813. struct net_device_context *ndc = netdev_priv(dev);
  814. cmd->base.speed = ndc->speed;
  815. cmd->base.duplex = ndc->duplex;
  816. cmd->base.port = PORT_OTHER;
  817. return 0;
  818. }
  819. static int netvsc_set_link_ksettings(struct net_device *dev,
  820. const struct ethtool_link_ksettings *cmd)
  821. {
  822. struct net_device_context *ndc = netdev_priv(dev);
  823. u32 speed;
  824. speed = cmd->base.speed;
  825. if (!ethtool_validate_speed(speed) ||
  826. !ethtool_validate_duplex(cmd->base.duplex) ||
  827. !netvsc_validate_ethtool_ss_cmd(cmd))
  828. return -EINVAL;
  829. ndc->speed = speed;
  830. ndc->duplex = cmd->base.duplex;
  831. return 0;
  832. }
  833. static int netvsc_change_mtu(struct net_device *ndev, int mtu)
  834. {
  835. struct net_device_context *ndevctx = netdev_priv(ndev);
  836. struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
  837. struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
  838. int orig_mtu = ndev->mtu;
  839. struct netvsc_device_info device_info;
  840. int ret = 0;
  841. if (!nvdev || nvdev->destroy)
  842. return -ENODEV;
  843. /* Change MTU of underlying VF netdev first. */
  844. if (vf_netdev) {
  845. ret = dev_set_mtu(vf_netdev, mtu);
  846. if (ret)
  847. return ret;
  848. }
  849. memset(&device_info, 0, sizeof(device_info));
  850. device_info.num_chn = nvdev->num_chn;
  851. device_info.send_sections = nvdev->send_section_cnt;
  852. device_info.send_section_size = nvdev->send_section_size;
  853. device_info.recv_sections = nvdev->recv_section_cnt;
  854. device_info.recv_section_size = nvdev->recv_section_size;
  855. ret = netvsc_detach(ndev, nvdev);
  856. if (ret)
  857. goto rollback_vf;
  858. ndev->mtu = mtu;
  859. ret = netvsc_attach(ndev, &device_info);
  860. if (ret)
  861. goto rollback;
  862. return 0;
  863. rollback:
  864. /* Attempt rollback to original MTU */
  865. ndev->mtu = orig_mtu;
  866. if (netvsc_attach(ndev, &device_info))
  867. netdev_err(ndev, "restoring mtu failed\n");
  868. rollback_vf:
  869. if (vf_netdev)
  870. dev_set_mtu(vf_netdev, orig_mtu);
  871. return ret;
  872. }
  873. static void netvsc_get_vf_stats(struct net_device *net,
  874. struct netvsc_vf_pcpu_stats *tot)
  875. {
  876. struct net_device_context *ndev_ctx = netdev_priv(net);
  877. int i;
  878. memset(tot, 0, sizeof(*tot));
  879. for_each_possible_cpu(i) {
  880. const struct netvsc_vf_pcpu_stats *stats
  881. = per_cpu_ptr(ndev_ctx->vf_stats, i);
  882. u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
  883. unsigned int start;
  884. do {
  885. start = u64_stats_fetch_begin_irq(&stats->syncp);
  886. rx_packets = stats->rx_packets;
  887. tx_packets = stats->tx_packets;
  888. rx_bytes = stats->rx_bytes;
  889. tx_bytes = stats->tx_bytes;
  890. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  891. tot->rx_packets += rx_packets;
  892. tot->tx_packets += tx_packets;
  893. tot->rx_bytes += rx_bytes;
  894. tot->tx_bytes += tx_bytes;
  895. tot->tx_dropped += stats->tx_dropped;
  896. }
  897. }
  898. static void netvsc_get_stats64(struct net_device *net,
  899. struct rtnl_link_stats64 *t)
  900. {
  901. struct net_device_context *ndev_ctx = netdev_priv(net);
  902. struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
  903. struct netvsc_vf_pcpu_stats vf_tot;
  904. int i;
  905. if (!nvdev)
  906. return;
  907. netdev_stats_to_stats64(t, &net->stats);
  908. netvsc_get_vf_stats(net, &vf_tot);
  909. t->rx_packets += vf_tot.rx_packets;
  910. t->tx_packets += vf_tot.tx_packets;
  911. t->rx_bytes += vf_tot.rx_bytes;
  912. t->tx_bytes += vf_tot.tx_bytes;
  913. t->tx_dropped += vf_tot.tx_dropped;
  914. for (i = 0; i < nvdev->num_chn; i++) {
  915. const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
  916. const struct netvsc_stats *stats;
  917. u64 packets, bytes, multicast;
  918. unsigned int start;
  919. stats = &nvchan->tx_stats;
  920. do {
  921. start = u64_stats_fetch_begin_irq(&stats->syncp);
  922. packets = stats->packets;
  923. bytes = stats->bytes;
  924. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  925. t->tx_bytes += bytes;
  926. t->tx_packets += packets;
  927. stats = &nvchan->rx_stats;
  928. do {
  929. start = u64_stats_fetch_begin_irq(&stats->syncp);
  930. packets = stats->packets;
  931. bytes = stats->bytes;
  932. multicast = stats->multicast + stats->broadcast;
  933. } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
  934. t->rx_bytes += bytes;
  935. t->rx_packets += packets;
  936. t->multicast += multicast;
  937. }
  938. }
  939. static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
  940. {
  941. struct net_device_context *ndc = netdev_priv(ndev);
  942. struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
  943. struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
  944. struct sockaddr *addr = p;
  945. int err;
  946. err = eth_prepare_mac_addr_change(ndev, p);
  947. if (err)
  948. return err;
  949. if (!nvdev)
  950. return -ENODEV;
  951. if (vf_netdev) {
  952. err = dev_set_mac_address(vf_netdev, addr);
  953. if (err)
  954. return err;
  955. }
  956. err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
  957. if (!err) {
  958. eth_commit_mac_addr_change(ndev, p);
  959. } else if (vf_netdev) {
  960. /* rollback change on VF */
  961. memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
  962. dev_set_mac_address(vf_netdev, addr);
  963. }
  964. return err;
  965. }
  966. static const struct {
  967. char name[ETH_GSTRING_LEN];
  968. u16 offset;
  969. } netvsc_stats[] = {
  970. { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
  971. { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
  972. { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
  973. { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
  974. { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
  975. { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
  976. { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
  977. { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
  978. { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
  979. { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
  980. }, vf_stats[] = {
  981. { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
  982. { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
  983. { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
  984. { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
  985. { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
  986. };
  987. #define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
  988. #define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
  989. /* 4 statistics per queue (rx/tx packets/bytes) */
  990. #define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
  991. static int netvsc_get_sset_count(struct net_device *dev, int string_set)
  992. {
  993. struct net_device_context *ndc = netdev_priv(dev);
  994. struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
  995. if (!nvdev)
  996. return -ENODEV;
  997. switch (string_set) {
  998. case ETH_SS_STATS:
  999. return NETVSC_GLOBAL_STATS_LEN
  1000. + NETVSC_VF_STATS_LEN
  1001. + NETVSC_QUEUE_STATS_LEN(nvdev);
  1002. default:
  1003. return -EINVAL;
  1004. }
  1005. }
  1006. static void netvsc_get_ethtool_stats(struct net_device *dev,
  1007. struct ethtool_stats *stats, u64 *data)
  1008. {
  1009. struct net_device_context *ndc = netdev_priv(dev);
  1010. struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
  1011. const void *nds = &ndc->eth_stats;
  1012. const struct netvsc_stats *qstats;
  1013. struct netvsc_vf_pcpu_stats sum;
  1014. unsigned int start;
  1015. u64 packets, bytes;
  1016. int i, j;
  1017. if (!nvdev)
  1018. return;
  1019. for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
  1020. data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
  1021. netvsc_get_vf_stats(dev, &sum);
  1022. for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
  1023. data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
  1024. for (j = 0; j < nvdev->num_chn; j++) {
  1025. qstats = &nvdev->chan_table[j].tx_stats;
  1026. do {
  1027. start = u64_stats_fetch_begin_irq(&qstats->syncp);
  1028. packets = qstats->packets;
  1029. bytes = qstats->bytes;
  1030. } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
  1031. data[i++] = packets;
  1032. data[i++] = bytes;
  1033. qstats = &nvdev->chan_table[j].rx_stats;
  1034. do {
  1035. start = u64_stats_fetch_begin_irq(&qstats->syncp);
  1036. packets = qstats->packets;
  1037. bytes = qstats->bytes;
  1038. } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
  1039. data[i++] = packets;
  1040. data[i++] = bytes;
  1041. }
  1042. }
  1043. static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
  1044. {
  1045. struct net_device_context *ndc = netdev_priv(dev);
  1046. struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
  1047. u8 *p = data;
  1048. int i;
  1049. if (!nvdev)
  1050. return;
  1051. switch (stringset) {
  1052. case ETH_SS_STATS:
  1053. for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
  1054. memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
  1055. p += ETH_GSTRING_LEN;
  1056. }
  1057. for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
  1058. memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
  1059. p += ETH_GSTRING_LEN;
  1060. }
  1061. for (i = 0; i < nvdev->num_chn; i++) {
  1062. sprintf(p, "tx_queue_%u_packets", i);
  1063. p += ETH_GSTRING_LEN;
  1064. sprintf(p, "tx_queue_%u_bytes", i);
  1065. p += ETH_GSTRING_LEN;
  1066. sprintf(p, "rx_queue_%u_packets", i);
  1067. p += ETH_GSTRING_LEN;
  1068. sprintf(p, "rx_queue_%u_bytes", i);
  1069. p += ETH_GSTRING_LEN;
  1070. }
  1071. break;
  1072. }
  1073. }
  1074. static int
  1075. netvsc_get_rss_hash_opts(struct net_device_context *ndc,
  1076. struct ethtool_rxnfc *info)
  1077. {
  1078. const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
  1079. info->data = RXH_IP_SRC | RXH_IP_DST;
  1080. switch (info->flow_type) {
  1081. case TCP_V4_FLOW:
  1082. if (ndc->l4_hash & HV_TCP4_L4HASH)
  1083. info->data |= l4_flag;
  1084. break;
  1085. case TCP_V6_FLOW:
  1086. if (ndc->l4_hash & HV_TCP6_L4HASH)
  1087. info->data |= l4_flag;
  1088. break;
  1089. case UDP_V4_FLOW:
  1090. if (ndc->l4_hash & HV_UDP4_L4HASH)
  1091. info->data |= l4_flag;
  1092. break;
  1093. case UDP_V6_FLOW:
  1094. if (ndc->l4_hash & HV_UDP6_L4HASH)
  1095. info->data |= l4_flag;
  1096. break;
  1097. case IPV4_FLOW:
  1098. case IPV6_FLOW:
  1099. break;
  1100. default:
  1101. info->data = 0;
  1102. break;
  1103. }
  1104. return 0;
  1105. }
  1106. static int
  1107. netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
  1108. u32 *rules)
  1109. {
  1110. struct net_device_context *ndc = netdev_priv(dev);
  1111. struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
  1112. if (!nvdev)
  1113. return -ENODEV;
  1114. switch (info->cmd) {
  1115. case ETHTOOL_GRXRINGS:
  1116. info->data = nvdev->num_chn;
  1117. return 0;
  1118. case ETHTOOL_GRXFH:
  1119. return netvsc_get_rss_hash_opts(ndc, info);
  1120. }
  1121. return -EOPNOTSUPP;
  1122. }
  1123. static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
  1124. struct ethtool_rxnfc *info)
  1125. {
  1126. if (info->data == (RXH_IP_SRC | RXH_IP_DST |
  1127. RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
  1128. switch (info->flow_type) {
  1129. case TCP_V4_FLOW:
  1130. ndc->l4_hash |= HV_TCP4_L4HASH;
  1131. break;
  1132. case TCP_V6_FLOW:
  1133. ndc->l4_hash |= HV_TCP6_L4HASH;
  1134. break;
  1135. case UDP_V4_FLOW:
  1136. ndc->l4_hash |= HV_UDP4_L4HASH;
  1137. break;
  1138. case UDP_V6_FLOW:
  1139. ndc->l4_hash |= HV_UDP6_L4HASH;
  1140. break;
  1141. default:
  1142. return -EOPNOTSUPP;
  1143. }
  1144. return 0;
  1145. }
  1146. if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
  1147. switch (info->flow_type) {
  1148. case TCP_V4_FLOW:
  1149. ndc->l4_hash &= ~HV_TCP4_L4HASH;
  1150. break;
  1151. case TCP_V6_FLOW:
  1152. ndc->l4_hash &= ~HV_TCP6_L4HASH;
  1153. break;
  1154. case UDP_V4_FLOW:
  1155. ndc->l4_hash &= ~HV_UDP4_L4HASH;
  1156. break;
  1157. case UDP_V6_FLOW:
  1158. ndc->l4_hash &= ~HV_UDP6_L4HASH;
  1159. break;
  1160. default:
  1161. return -EOPNOTSUPP;
  1162. }
  1163. return 0;
  1164. }
  1165. return -EOPNOTSUPP;
  1166. }
  1167. static int
  1168. netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
  1169. {
  1170. struct net_device_context *ndc = netdev_priv(ndev);
  1171. if (info->cmd == ETHTOOL_SRXFH)
  1172. return netvsc_set_rss_hash_opts(ndc, info);
  1173. return -EOPNOTSUPP;
  1174. }
  1175. #ifdef CONFIG_NET_POLL_CONTROLLER
  1176. static void netvsc_poll_controller(struct net_device *dev)
  1177. {
  1178. struct net_device_context *ndc = netdev_priv(dev);
  1179. struct netvsc_device *ndev;
  1180. int i;
  1181. rcu_read_lock();
  1182. ndev = rcu_dereference(ndc->nvdev);
  1183. if (ndev) {
  1184. for (i = 0; i < ndev->num_chn; i++) {
  1185. struct netvsc_channel *nvchan = &ndev->chan_table[i];
  1186. napi_schedule(&nvchan->napi);
  1187. }
  1188. }
  1189. rcu_read_unlock();
  1190. }
  1191. #endif
  1192. static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
  1193. {
  1194. return NETVSC_HASH_KEYLEN;
  1195. }
  1196. static u32 netvsc_rss_indir_size(struct net_device *dev)
  1197. {
  1198. return ITAB_NUM;
  1199. }
  1200. static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
  1201. u8 *hfunc)
  1202. {
  1203. struct net_device_context *ndc = netdev_priv(dev);
  1204. struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
  1205. struct rndis_device *rndis_dev;
  1206. int i;
  1207. if (!ndev)
  1208. return -ENODEV;
  1209. if (hfunc)
  1210. *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
  1211. rndis_dev = ndev->extension;
  1212. if (indir) {
  1213. for (i = 0; i < ITAB_NUM; i++)
  1214. indir[i] = rndis_dev->rx_table[i];
  1215. }
  1216. if (key)
  1217. memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
  1218. return 0;
  1219. }
  1220. static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
  1221. const u8 *key, const u8 hfunc)
  1222. {
  1223. struct net_device_context *ndc = netdev_priv(dev);
  1224. struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
  1225. struct rndis_device *rndis_dev;
  1226. int i;
  1227. if (!ndev)
  1228. return -ENODEV;
  1229. if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
  1230. return -EOPNOTSUPP;
  1231. rndis_dev = ndev->extension;
  1232. if (indir) {
  1233. for (i = 0; i < ITAB_NUM; i++)
  1234. if (indir[i] >= ndev->num_chn)
  1235. return -EINVAL;
  1236. for (i = 0; i < ITAB_NUM; i++)
  1237. rndis_dev->rx_table[i] = indir[i];
  1238. }
  1239. if (!key) {
  1240. if (!indir)
  1241. return 0;
  1242. key = rndis_dev->rss_key;
  1243. }
  1244. return rndis_filter_set_rss_param(rndis_dev, key);
  1245. }
  1246. /* Hyper-V RNDIS protocol does not have ring in the HW sense.
  1247. * It does have pre-allocated receive area which is divided into sections.
  1248. */
  1249. static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
  1250. struct ethtool_ringparam *ring)
  1251. {
  1252. u32 max_buf_size;
  1253. ring->rx_pending = nvdev->recv_section_cnt;
  1254. ring->tx_pending = nvdev->send_section_cnt;
  1255. if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
  1256. max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
  1257. else
  1258. max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
  1259. ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
  1260. ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
  1261. / nvdev->send_section_size;
  1262. }
  1263. static void netvsc_get_ringparam(struct net_device *ndev,
  1264. struct ethtool_ringparam *ring)
  1265. {
  1266. struct net_device_context *ndevctx = netdev_priv(ndev);
  1267. struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
  1268. if (!nvdev)
  1269. return;
  1270. __netvsc_get_ringparam(nvdev, ring);
  1271. }
  1272. static int netvsc_set_ringparam(struct net_device *ndev,
  1273. struct ethtool_ringparam *ring)
  1274. {
  1275. struct net_device_context *ndevctx = netdev_priv(ndev);
  1276. struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
  1277. struct netvsc_device_info device_info;
  1278. struct ethtool_ringparam orig;
  1279. u32 new_tx, new_rx;
  1280. int ret = 0;
  1281. if (!nvdev || nvdev->destroy)
  1282. return -ENODEV;
  1283. memset(&orig, 0, sizeof(orig));
  1284. __netvsc_get_ringparam(nvdev, &orig);
  1285. new_tx = clamp_t(u32, ring->tx_pending,
  1286. NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
  1287. new_rx = clamp_t(u32, ring->rx_pending,
  1288. NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
  1289. if (new_tx == orig.tx_pending &&
  1290. new_rx == orig.rx_pending)
  1291. return 0; /* no change */
  1292. memset(&device_info, 0, sizeof(device_info));
  1293. device_info.num_chn = nvdev->num_chn;
  1294. device_info.send_sections = new_tx;
  1295. device_info.send_section_size = nvdev->send_section_size;
  1296. device_info.recv_sections = new_rx;
  1297. device_info.recv_section_size = nvdev->recv_section_size;
  1298. ret = netvsc_detach(ndev, nvdev);
  1299. if (ret)
  1300. return ret;
  1301. ret = netvsc_attach(ndev, &device_info);
  1302. if (ret) {
  1303. device_info.send_sections = orig.tx_pending;
  1304. device_info.recv_sections = orig.rx_pending;
  1305. if (netvsc_attach(ndev, &device_info))
  1306. netdev_err(ndev, "restoring ringparam failed");
  1307. }
  1308. return ret;
  1309. }
  1310. static u32 netvsc_get_msglevel(struct net_device *ndev)
  1311. {
  1312. struct net_device_context *ndev_ctx = netdev_priv(ndev);
  1313. return ndev_ctx->msg_enable;
  1314. }
  1315. static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
  1316. {
  1317. struct net_device_context *ndev_ctx = netdev_priv(ndev);
  1318. ndev_ctx->msg_enable = val;
  1319. }
  1320. static const struct ethtool_ops ethtool_ops = {
  1321. .get_drvinfo = netvsc_get_drvinfo,
  1322. .get_msglevel = netvsc_get_msglevel,
  1323. .set_msglevel = netvsc_set_msglevel,
  1324. .get_link = ethtool_op_get_link,
  1325. .get_ethtool_stats = netvsc_get_ethtool_stats,
  1326. .get_sset_count = netvsc_get_sset_count,
  1327. .get_strings = netvsc_get_strings,
  1328. .get_channels = netvsc_get_channels,
  1329. .set_channels = netvsc_set_channels,
  1330. .get_ts_info = ethtool_op_get_ts_info,
  1331. .get_rxnfc = netvsc_get_rxnfc,
  1332. .set_rxnfc = netvsc_set_rxnfc,
  1333. .get_rxfh_key_size = netvsc_get_rxfh_key_size,
  1334. .get_rxfh_indir_size = netvsc_rss_indir_size,
  1335. .get_rxfh = netvsc_get_rxfh,
  1336. .set_rxfh = netvsc_set_rxfh,
  1337. .get_link_ksettings = netvsc_get_link_ksettings,
  1338. .set_link_ksettings = netvsc_set_link_ksettings,
  1339. .get_ringparam = netvsc_get_ringparam,
  1340. .set_ringparam = netvsc_set_ringparam,
  1341. };
  1342. static const struct net_device_ops device_ops = {
  1343. .ndo_open = netvsc_open,
  1344. .ndo_stop = netvsc_close,
  1345. .ndo_start_xmit = netvsc_start_xmit,
  1346. .ndo_change_rx_flags = netvsc_change_rx_flags,
  1347. .ndo_set_rx_mode = netvsc_set_rx_mode,
  1348. .ndo_change_mtu = netvsc_change_mtu,
  1349. .ndo_validate_addr = eth_validate_addr,
  1350. .ndo_set_mac_address = netvsc_set_mac_addr,
  1351. .ndo_select_queue = netvsc_select_queue,
  1352. .ndo_get_stats64 = netvsc_get_stats64,
  1353. #ifdef CONFIG_NET_POLL_CONTROLLER
  1354. .ndo_poll_controller = netvsc_poll_controller,
  1355. #endif
  1356. };
  1357. /*
  1358. * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
  1359. * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
  1360. * present send GARP packet to network peers with netif_notify_peers().
  1361. */
  1362. static void netvsc_link_change(struct work_struct *w)
  1363. {
  1364. struct net_device_context *ndev_ctx =
  1365. container_of(w, struct net_device_context, dwork.work);
  1366. struct hv_device *device_obj = ndev_ctx->device_ctx;
  1367. struct net_device *net = hv_get_drvdata(device_obj);
  1368. struct netvsc_device *net_device;
  1369. struct rndis_device *rdev;
  1370. struct netvsc_reconfig *event = NULL;
  1371. bool notify = false, reschedule = false;
  1372. unsigned long flags, next_reconfig, delay;
  1373. /* if changes are happening, comeback later */
  1374. if (!rtnl_trylock()) {
  1375. schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
  1376. return;
  1377. }
  1378. net_device = rtnl_dereference(ndev_ctx->nvdev);
  1379. if (!net_device)
  1380. goto out_unlock;
  1381. rdev = net_device->extension;
  1382. next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
  1383. if (time_is_after_jiffies(next_reconfig)) {
  1384. /* link_watch only sends one notification with current state
  1385. * per second, avoid doing reconfig more frequently. Handle
  1386. * wrap around.
  1387. */
  1388. delay = next_reconfig - jiffies;
  1389. delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
  1390. schedule_delayed_work(&ndev_ctx->dwork, delay);
  1391. goto out_unlock;
  1392. }
  1393. ndev_ctx->last_reconfig = jiffies;
  1394. spin_lock_irqsave(&ndev_ctx->lock, flags);
  1395. if (!list_empty(&ndev_ctx->reconfig_events)) {
  1396. event = list_first_entry(&ndev_ctx->reconfig_events,
  1397. struct netvsc_reconfig, list);
  1398. list_del(&event->list);
  1399. reschedule = !list_empty(&ndev_ctx->reconfig_events);
  1400. }
  1401. spin_unlock_irqrestore(&ndev_ctx->lock, flags);
  1402. if (!event)
  1403. goto out_unlock;
  1404. switch (event->event) {
  1405. /* Only the following events are possible due to the check in
  1406. * netvsc_linkstatus_callback()
  1407. */
  1408. case RNDIS_STATUS_MEDIA_CONNECT:
  1409. if (rdev->link_state) {
  1410. rdev->link_state = false;
  1411. netif_carrier_on(net);
  1412. netif_tx_wake_all_queues(net);
  1413. } else {
  1414. notify = true;
  1415. }
  1416. kfree(event);
  1417. break;
  1418. case RNDIS_STATUS_MEDIA_DISCONNECT:
  1419. if (!rdev->link_state) {
  1420. rdev->link_state = true;
  1421. netif_carrier_off(net);
  1422. netif_tx_stop_all_queues(net);
  1423. }
  1424. kfree(event);
  1425. break;
  1426. case RNDIS_STATUS_NETWORK_CHANGE:
  1427. /* Only makes sense if carrier is present */
  1428. if (!rdev->link_state) {
  1429. rdev->link_state = true;
  1430. netif_carrier_off(net);
  1431. netif_tx_stop_all_queues(net);
  1432. event->event = RNDIS_STATUS_MEDIA_CONNECT;
  1433. spin_lock_irqsave(&ndev_ctx->lock, flags);
  1434. list_add(&event->list, &ndev_ctx->reconfig_events);
  1435. spin_unlock_irqrestore(&ndev_ctx->lock, flags);
  1436. reschedule = true;
  1437. }
  1438. break;
  1439. }
  1440. rtnl_unlock();
  1441. if (notify)
  1442. netdev_notify_peers(net);
  1443. /* link_watch only sends one notification with current state per
  1444. * second, handle next reconfig event in 2 seconds.
  1445. */
  1446. if (reschedule)
  1447. schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
  1448. return;
  1449. out_unlock:
  1450. rtnl_unlock();
  1451. }
  1452. /* Called when VF is injecting data into network stack.
  1453. * Change the associated network device from VF to netvsc.
  1454. * note: already called with rcu_read_lock
  1455. */
  1456. static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
  1457. {
  1458. struct sk_buff *skb = *pskb;
  1459. struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
  1460. struct net_device_context *ndev_ctx = netdev_priv(ndev);
  1461. struct netvsc_vf_pcpu_stats *pcpu_stats
  1462. = this_cpu_ptr(ndev_ctx->vf_stats);
  1463. skb->dev = ndev;
  1464. u64_stats_update_begin(&pcpu_stats->syncp);
  1465. pcpu_stats->rx_packets++;
  1466. pcpu_stats->rx_bytes += skb->len;
  1467. u64_stats_update_end(&pcpu_stats->syncp);
  1468. return RX_HANDLER_ANOTHER;
  1469. }
  1470. static void __netvsc_vf_setup(struct net_device *ndev,
  1471. struct net_device *vf_netdev)
  1472. {
  1473. int ret;
  1474. /* Align MTU of VF with master */
  1475. ret = dev_set_mtu(vf_netdev, ndev->mtu);
  1476. if (ret)
  1477. netdev_warn(vf_netdev,
  1478. "unable to change mtu to %u\n", ndev->mtu);
  1479. /* set multicast etc flags on VF */
  1480. dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
  1481. /* sync address list from ndev to VF */
  1482. netif_addr_lock_bh(ndev);
  1483. dev_uc_sync(vf_netdev, ndev);
  1484. dev_mc_sync(vf_netdev, ndev);
  1485. netif_addr_unlock_bh(ndev);
  1486. if (netif_running(ndev)) {
  1487. ret = dev_open(vf_netdev);
  1488. if (ret)
  1489. netdev_warn(vf_netdev,
  1490. "unable to open: %d\n", ret);
  1491. }
  1492. }
  1493. /* Setup VF as slave of the synthetic device.
  1494. * Runs in workqueue to avoid recursion in netlink callbacks.
  1495. */
  1496. static void netvsc_vf_setup(struct work_struct *w)
  1497. {
  1498. struct net_device_context *ndev_ctx
  1499. = container_of(w, struct net_device_context, vf_takeover.work);
  1500. struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
  1501. struct net_device *vf_netdev;
  1502. if (!rtnl_trylock()) {
  1503. schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
  1504. return;
  1505. }
  1506. vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
  1507. if (vf_netdev)
  1508. __netvsc_vf_setup(ndev, vf_netdev);
  1509. rtnl_unlock();
  1510. }
  1511. static int netvsc_pre_register_vf(struct net_device *vf_netdev,
  1512. struct net_device *ndev)
  1513. {
  1514. struct net_device_context *net_device_ctx;
  1515. struct netvsc_device *netvsc_dev;
  1516. net_device_ctx = netdev_priv(ndev);
  1517. netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
  1518. if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
  1519. return -ENODEV;
  1520. return 0;
  1521. }
  1522. static int netvsc_register_vf(struct net_device *vf_netdev,
  1523. struct net_device *ndev)
  1524. {
  1525. struct net_device_context *ndev_ctx = netdev_priv(ndev);
  1526. /* set slave flag before open to prevent IPv6 addrconf */
  1527. vf_netdev->flags |= IFF_SLAVE;
  1528. schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
  1529. call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
  1530. netdev_info(vf_netdev, "joined to %s\n", ndev->name);
  1531. dev_hold(vf_netdev);
  1532. rcu_assign_pointer(ndev_ctx->vf_netdev, vf_netdev);
  1533. return 0;
  1534. }
  1535. /* VF up/down change detected, schedule to change data path */
  1536. static int netvsc_vf_changed(struct net_device *vf_netdev,
  1537. struct net_device *ndev)
  1538. {
  1539. struct net_device_context *net_device_ctx;
  1540. struct netvsc_device *netvsc_dev;
  1541. bool vf_is_up = netif_running(vf_netdev);
  1542. net_device_ctx = netdev_priv(ndev);
  1543. netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
  1544. if (!netvsc_dev)
  1545. return -ENODEV;
  1546. netvsc_switch_datapath(ndev, vf_is_up);
  1547. netdev_info(ndev, "Data path switched %s VF: %s\n",
  1548. vf_is_up ? "to" : "from", vf_netdev->name);
  1549. return 0;
  1550. }
  1551. static int netvsc_pre_unregister_vf(struct net_device *vf_netdev,
  1552. struct net_device *ndev)
  1553. {
  1554. struct net_device_context *net_device_ctx;
  1555. net_device_ctx = netdev_priv(ndev);
  1556. cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
  1557. return 0;
  1558. }
  1559. static int netvsc_unregister_vf(struct net_device *vf_netdev,
  1560. struct net_device *ndev)
  1561. {
  1562. struct net_device_context *net_device_ctx;
  1563. net_device_ctx = netdev_priv(ndev);
  1564. netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
  1565. RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
  1566. dev_put(vf_netdev);
  1567. return 0;
  1568. }
  1569. static struct failover_ops netvsc_failover_ops = {
  1570. .slave_pre_register = netvsc_pre_register_vf,
  1571. .slave_register = netvsc_register_vf,
  1572. .slave_pre_unregister = netvsc_pre_unregister_vf,
  1573. .slave_unregister = netvsc_unregister_vf,
  1574. .slave_link_change = netvsc_vf_changed,
  1575. .slave_handle_frame = netvsc_vf_handle_frame,
  1576. };
  1577. static int netvsc_probe(struct hv_device *dev,
  1578. const struct hv_vmbus_device_id *dev_id)
  1579. {
  1580. struct net_device *net = NULL;
  1581. struct net_device_context *net_device_ctx;
  1582. struct netvsc_device_info device_info;
  1583. struct netvsc_device *nvdev;
  1584. int ret = -ENOMEM;
  1585. net = alloc_etherdev_mq(sizeof(struct net_device_context),
  1586. VRSS_CHANNEL_MAX);
  1587. if (!net)
  1588. goto no_net;
  1589. netif_carrier_off(net);
  1590. netvsc_init_settings(net);
  1591. net_device_ctx = netdev_priv(net);
  1592. net_device_ctx->device_ctx = dev;
  1593. net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
  1594. if (netif_msg_probe(net_device_ctx))
  1595. netdev_dbg(net, "netvsc msg_enable: %d\n",
  1596. net_device_ctx->msg_enable);
  1597. hv_set_drvdata(dev, net);
  1598. INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
  1599. spin_lock_init(&net_device_ctx->lock);
  1600. INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
  1601. INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
  1602. net_device_ctx->vf_stats
  1603. = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
  1604. if (!net_device_ctx->vf_stats)
  1605. goto no_stats;
  1606. net->netdev_ops = &device_ops;
  1607. net->ethtool_ops = &ethtool_ops;
  1608. SET_NETDEV_DEV(net, &dev->device);
  1609. /* We always need headroom for rndis header */
  1610. net->needed_headroom = RNDIS_AND_PPI_SIZE;
  1611. /* Initialize the number of queues to be 1, we may change it if more
  1612. * channels are offered later.
  1613. */
  1614. netif_set_real_num_tx_queues(net, 1);
  1615. netif_set_real_num_rx_queues(net, 1);
  1616. /* Notify the netvsc driver of the new device */
  1617. memset(&device_info, 0, sizeof(device_info));
  1618. device_info.num_chn = VRSS_CHANNEL_DEFAULT;
  1619. device_info.send_sections = NETVSC_DEFAULT_TX;
  1620. device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
  1621. device_info.recv_sections = NETVSC_DEFAULT_RX;
  1622. device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
  1623. nvdev = rndis_filter_device_add(dev, &device_info);
  1624. if (IS_ERR(nvdev)) {
  1625. ret = PTR_ERR(nvdev);
  1626. netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
  1627. goto rndis_failed;
  1628. }
  1629. memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
  1630. /* hw_features computed in rndis_netdev_set_hwcaps() */
  1631. net->features = net->hw_features |
  1632. NETIF_F_HIGHDMA | NETIF_F_SG |
  1633. NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
  1634. net->vlan_features = net->features;
  1635. netdev_lockdep_set_classes(net);
  1636. /* MTU range: 68 - 1500 or 65521 */
  1637. net->min_mtu = NETVSC_MTU_MIN;
  1638. if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
  1639. net->max_mtu = NETVSC_MTU - ETH_HLEN;
  1640. else
  1641. net->max_mtu = ETH_DATA_LEN;
  1642. ret = register_netdev(net);
  1643. if (ret != 0) {
  1644. pr_err("Unable to register netdev.\n");
  1645. goto register_failed;
  1646. }
  1647. net_device_ctx->failover = failover_register(net, &netvsc_failover_ops);
  1648. if (IS_ERR(net_device_ctx->failover)) {
  1649. ret = PTR_ERR(net_device_ctx->failover);
  1650. goto err_failover;
  1651. }
  1652. return ret;
  1653. err_failover:
  1654. unregister_netdev(net);
  1655. register_failed:
  1656. rndis_filter_device_remove(dev, nvdev);
  1657. rndis_failed:
  1658. free_percpu(net_device_ctx->vf_stats);
  1659. no_stats:
  1660. hv_set_drvdata(dev, NULL);
  1661. free_netdev(net);
  1662. no_net:
  1663. return ret;
  1664. }
  1665. static int netvsc_remove(struct hv_device *dev)
  1666. {
  1667. struct net_device_context *ndev_ctx;
  1668. struct net_device *vf_netdev, *net;
  1669. struct netvsc_device *nvdev;
  1670. net = hv_get_drvdata(dev);
  1671. if (net == NULL) {
  1672. dev_err(&dev->device, "No net device to remove\n");
  1673. return 0;
  1674. }
  1675. ndev_ctx = netdev_priv(net);
  1676. cancel_delayed_work_sync(&ndev_ctx->dwork);
  1677. rcu_read_lock();
  1678. nvdev = rcu_dereference(ndev_ctx->nvdev);
  1679. if (nvdev)
  1680. cancel_work_sync(&nvdev->subchan_work);
  1681. /*
  1682. * Call to the vsc driver to let it know that the device is being
  1683. * removed. Also blocks mtu and channel changes.
  1684. */
  1685. rtnl_lock();
  1686. vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
  1687. if (vf_netdev)
  1688. failover_slave_unregister(vf_netdev);
  1689. if (nvdev)
  1690. rndis_filter_device_remove(dev, nvdev);
  1691. unregister_netdevice(net);
  1692. failover_unregister(ndev_ctx->failover);
  1693. rtnl_unlock();
  1694. rcu_read_unlock();
  1695. hv_set_drvdata(dev, NULL);
  1696. free_percpu(ndev_ctx->vf_stats);
  1697. free_netdev(net);
  1698. return 0;
  1699. }
  1700. static const struct hv_vmbus_device_id id_table[] = {
  1701. /* Network guid */
  1702. { HV_NIC_GUID, },
  1703. { },
  1704. };
  1705. MODULE_DEVICE_TABLE(vmbus, id_table);
  1706. /* The one and only one */
  1707. static struct hv_driver netvsc_drv = {
  1708. .name = KBUILD_MODNAME,
  1709. .id_table = id_table,
  1710. .probe = netvsc_probe,
  1711. .remove = netvsc_remove,
  1712. };
  1713. static void __exit netvsc_drv_exit(void)
  1714. {
  1715. vmbus_driver_unregister(&netvsc_drv);
  1716. }
  1717. static int __init netvsc_drv_init(void)
  1718. {
  1719. int ret;
  1720. if (ring_size < RING_SIZE_MIN) {
  1721. ring_size = RING_SIZE_MIN;
  1722. pr_info("Increased ring_size to %u (min allowed)\n",
  1723. ring_size);
  1724. }
  1725. netvsc_ring_bytes = ring_size * PAGE_SIZE;
  1726. ret = vmbus_driver_register(&netvsc_drv);
  1727. if (ret)
  1728. return ret;
  1729. return 0;
  1730. }
  1731. MODULE_LICENSE("GPL");
  1732. MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
  1733. module_init(netvsc_drv_init);
  1734. module_exit(netvsc_drv_exit);