xen-netfront.c 54 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188
  1. /*
  2. * Virtual network driver for conversing with remote driver backends.
  3. *
  4. * Copyright (c) 2002-2005, K A Fraser
  5. * Copyright (c) 2005, XenSource Ltd
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License version 2
  9. * as published by the Free Software Foundation; or, when distributed
  10. * separately from the Linux kernel or incorporated into other
  11. * software packages, subject to the following license:
  12. *
  13. * Permission is hereby granted, free of charge, to any person obtaining a copy
  14. * of this source file (the "Software"), to deal in the Software without
  15. * restriction, including without limitation the rights to use, copy, modify,
  16. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  17. * and to permit persons to whom the Software is furnished to do so, subject to
  18. * the following conditions:
  19. *
  20. * The above copyright notice and this permission notice shall be included in
  21. * all copies or substantial portions of the Software.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  24. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  25. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  26. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  27. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  28. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  29. * IN THE SOFTWARE.
  30. */
  31. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  32. #include <linux/module.h>
  33. #include <linux/kernel.h>
  34. #include <linux/netdevice.h>
  35. #include <linux/etherdevice.h>
  36. #include <linux/skbuff.h>
  37. #include <linux/ethtool.h>
  38. #include <linux/if_ether.h>
  39. #include <net/tcp.h>
  40. #include <linux/udp.h>
  41. #include <linux/moduleparam.h>
  42. #include <linux/mm.h>
  43. #include <linux/slab.h>
  44. #include <net/ip.h>
  45. #include <xen/xen.h>
  46. #include <xen/xenbus.h>
  47. #include <xen/events.h>
  48. #include <xen/page.h>
  49. #include <xen/platform_pci.h>
  50. #include <xen/grant_table.h>
  51. #include <xen/interface/io/netif.h>
  52. #include <xen/interface/memory.h>
  53. #include <xen/interface/grant_table.h>
  54. /* Module parameters */
  55. #define MAX_QUEUES_DEFAULT 8
  56. static unsigned int xennet_max_queues;
  57. module_param_named(max_queues, xennet_max_queues, uint, 0644);
  58. MODULE_PARM_DESC(max_queues,
  59. "Maximum number of queues per virtual interface");
  60. static const struct ethtool_ops xennet_ethtool_ops;
  61. struct netfront_cb {
  62. int pull_to;
  63. };
  64. #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
  65. #define RX_COPY_THRESHOLD 256
  66. #define GRANT_INVALID_REF 0
  67. #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
  68. #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
  69. /* Minimum number of Rx slots (includes slot for GSO metadata). */
  70. #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
  71. /* Queue name is interface name with "-qNNN" appended */
  72. #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
  73. /* IRQ name is queue name with "-tx" or "-rx" appended */
  74. #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
  75. struct netfront_stats {
  76. u64 packets;
  77. u64 bytes;
  78. struct u64_stats_sync syncp;
  79. };
  80. struct netfront_info;
  81. struct netfront_queue {
  82. unsigned int id; /* Queue ID, 0-based */
  83. char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
  84. struct netfront_info *info;
  85. struct napi_struct napi;
  86. /* Split event channels support, tx_* == rx_* when using
  87. * single event channel.
  88. */
  89. unsigned int tx_evtchn, rx_evtchn;
  90. unsigned int tx_irq, rx_irq;
  91. /* Only used when split event channels support is enabled */
  92. char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
  93. char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
  94. spinlock_t tx_lock;
  95. struct xen_netif_tx_front_ring tx;
  96. int tx_ring_ref;
  97. /*
  98. * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
  99. * are linked from tx_skb_freelist through skb_entry.link.
  100. *
  101. * NB. Freelist index entries are always going to be less than
  102. * PAGE_OFFSET, whereas pointers to skbs will always be equal or
  103. * greater than PAGE_OFFSET: we use this property to distinguish
  104. * them.
  105. */
  106. union skb_entry {
  107. struct sk_buff *skb;
  108. unsigned long link;
  109. } tx_skbs[NET_TX_RING_SIZE];
  110. grant_ref_t gref_tx_head;
  111. grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
  112. struct page *grant_tx_page[NET_TX_RING_SIZE];
  113. unsigned tx_skb_freelist;
  114. spinlock_t rx_lock ____cacheline_aligned_in_smp;
  115. struct xen_netif_rx_front_ring rx;
  116. int rx_ring_ref;
  117. struct timer_list rx_refill_timer;
  118. struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
  119. grant_ref_t gref_rx_head;
  120. grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
  121. };
  122. struct netfront_info {
  123. struct list_head list;
  124. struct net_device *netdev;
  125. struct xenbus_device *xbdev;
  126. /* Multi-queue support */
  127. struct netfront_queue *queues;
  128. /* Statistics */
  129. struct netfront_stats __percpu *rx_stats;
  130. struct netfront_stats __percpu *tx_stats;
  131. atomic_t rx_gso_checksum_fixup;
  132. };
  133. struct netfront_rx_info {
  134. struct xen_netif_rx_response rx;
  135. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
  136. };
  137. static void skb_entry_set_link(union skb_entry *list, unsigned short id)
  138. {
  139. list->link = id;
  140. }
  141. static int skb_entry_is_link(const union skb_entry *list)
  142. {
  143. BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
  144. return (unsigned long)list->skb < PAGE_OFFSET;
  145. }
  146. /*
  147. * Access macros for acquiring freeing slots in tx_skbs[].
  148. */
  149. static void add_id_to_freelist(unsigned *head, union skb_entry *list,
  150. unsigned short id)
  151. {
  152. skb_entry_set_link(&list[id], *head);
  153. *head = id;
  154. }
  155. static unsigned short get_id_from_freelist(unsigned *head,
  156. union skb_entry *list)
  157. {
  158. unsigned int id = *head;
  159. *head = list[id].link;
  160. return id;
  161. }
  162. static int xennet_rxidx(RING_IDX idx)
  163. {
  164. return idx & (NET_RX_RING_SIZE - 1);
  165. }
  166. static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
  167. RING_IDX ri)
  168. {
  169. int i = xennet_rxidx(ri);
  170. struct sk_buff *skb = queue->rx_skbs[i];
  171. queue->rx_skbs[i] = NULL;
  172. return skb;
  173. }
  174. static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
  175. RING_IDX ri)
  176. {
  177. int i = xennet_rxidx(ri);
  178. grant_ref_t ref = queue->grant_rx_ref[i];
  179. queue->grant_rx_ref[i] = GRANT_INVALID_REF;
  180. return ref;
  181. }
  182. #ifdef CONFIG_SYSFS
  183. static const struct attribute_group xennet_dev_group;
  184. #endif
  185. static bool xennet_can_sg(struct net_device *dev)
  186. {
  187. return dev->features & NETIF_F_SG;
  188. }
  189. static void rx_refill_timeout(unsigned long data)
  190. {
  191. struct netfront_queue *queue = (struct netfront_queue *)data;
  192. napi_schedule(&queue->napi);
  193. }
  194. static int netfront_tx_slot_available(struct netfront_queue *queue)
  195. {
  196. return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
  197. (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
  198. }
  199. static void xennet_maybe_wake_tx(struct netfront_queue *queue)
  200. {
  201. struct net_device *dev = queue->info->netdev;
  202. struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
  203. if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
  204. netfront_tx_slot_available(queue) &&
  205. likely(netif_running(dev)))
  206. netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
  207. }
  208. static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
  209. {
  210. struct sk_buff *skb;
  211. struct page *page;
  212. skb = __netdev_alloc_skb(queue->info->netdev,
  213. RX_COPY_THRESHOLD + NET_IP_ALIGN,
  214. GFP_ATOMIC | __GFP_NOWARN);
  215. if (unlikely(!skb))
  216. return NULL;
  217. page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
  218. if (!page) {
  219. kfree_skb(skb);
  220. return NULL;
  221. }
  222. skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
  223. /* Align ip header to a 16 bytes boundary */
  224. skb_reserve(skb, NET_IP_ALIGN);
  225. skb->dev = queue->info->netdev;
  226. return skb;
  227. }
  228. static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
  229. {
  230. RING_IDX req_prod = queue->rx.req_prod_pvt;
  231. int notify;
  232. int err = 0;
  233. if (unlikely(!netif_carrier_ok(queue->info->netdev)))
  234. return;
  235. for (req_prod = queue->rx.req_prod_pvt;
  236. req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
  237. req_prod++) {
  238. struct sk_buff *skb;
  239. unsigned short id;
  240. grant_ref_t ref;
  241. struct page *page;
  242. struct xen_netif_rx_request *req;
  243. skb = xennet_alloc_one_rx_buffer(queue);
  244. if (!skb) {
  245. err = -ENOMEM;
  246. break;
  247. }
  248. id = xennet_rxidx(req_prod);
  249. BUG_ON(queue->rx_skbs[id]);
  250. queue->rx_skbs[id] = skb;
  251. ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
  252. WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
  253. queue->grant_rx_ref[id] = ref;
  254. page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
  255. req = RING_GET_REQUEST(&queue->rx, req_prod);
  256. gnttab_page_grant_foreign_access_ref_one(ref,
  257. queue->info->xbdev->otherend_id,
  258. page,
  259. 0);
  260. req->id = id;
  261. req->gref = ref;
  262. }
  263. queue->rx.req_prod_pvt = req_prod;
  264. /* Try again later if there are not enough requests or skb allocation
  265. * failed.
  266. * Enough requests is quantified as the sum of newly created slots and
  267. * the unconsumed slots at the backend.
  268. */
  269. if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
  270. unlikely(err)) {
  271. mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
  272. return;
  273. }
  274. wmb(); /* barrier so backend seens requests */
  275. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
  276. if (notify)
  277. notify_remote_via_irq(queue->rx_irq);
  278. }
  279. static int xennet_open(struct net_device *dev)
  280. {
  281. struct netfront_info *np = netdev_priv(dev);
  282. unsigned int num_queues = dev->real_num_tx_queues;
  283. unsigned int i = 0;
  284. struct netfront_queue *queue = NULL;
  285. for (i = 0; i < num_queues; ++i) {
  286. queue = &np->queues[i];
  287. napi_enable(&queue->napi);
  288. spin_lock_bh(&queue->rx_lock);
  289. if (netif_carrier_ok(dev)) {
  290. xennet_alloc_rx_buffers(queue);
  291. queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
  292. if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
  293. napi_schedule(&queue->napi);
  294. }
  295. spin_unlock_bh(&queue->rx_lock);
  296. }
  297. netif_tx_start_all_queues(dev);
  298. return 0;
  299. }
  300. static void xennet_tx_buf_gc(struct netfront_queue *queue)
  301. {
  302. RING_IDX cons, prod;
  303. unsigned short id;
  304. struct sk_buff *skb;
  305. bool more_to_do;
  306. BUG_ON(!netif_carrier_ok(queue->info->netdev));
  307. do {
  308. prod = queue->tx.sring->rsp_prod;
  309. rmb(); /* Ensure we see responses up to 'rp'. */
  310. for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
  311. struct xen_netif_tx_response *txrsp;
  312. txrsp = RING_GET_RESPONSE(&queue->tx, cons);
  313. if (txrsp->status == XEN_NETIF_RSP_NULL)
  314. continue;
  315. id = txrsp->id;
  316. skb = queue->tx_skbs[id].skb;
  317. if (unlikely(gnttab_query_foreign_access(
  318. queue->grant_tx_ref[id]) != 0)) {
  319. pr_alert("%s: warning -- grant still in use by backend domain\n",
  320. __func__);
  321. BUG();
  322. }
  323. gnttab_end_foreign_access_ref(
  324. queue->grant_tx_ref[id], GNTMAP_readonly);
  325. gnttab_release_grant_reference(
  326. &queue->gref_tx_head, queue->grant_tx_ref[id]);
  327. queue->grant_tx_ref[id] = GRANT_INVALID_REF;
  328. queue->grant_tx_page[id] = NULL;
  329. add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
  330. dev_kfree_skb_irq(skb);
  331. }
  332. queue->tx.rsp_cons = prod;
  333. RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
  334. } while (more_to_do);
  335. xennet_maybe_wake_tx(queue);
  336. }
  337. struct xennet_gnttab_make_txreq {
  338. struct netfront_queue *queue;
  339. struct sk_buff *skb;
  340. struct page *page;
  341. struct xen_netif_tx_request *tx; /* Last request */
  342. unsigned int size;
  343. };
  344. static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
  345. unsigned int len, void *data)
  346. {
  347. struct xennet_gnttab_make_txreq *info = data;
  348. unsigned int id;
  349. struct xen_netif_tx_request *tx;
  350. grant_ref_t ref;
  351. /* convenient aliases */
  352. struct page *page = info->page;
  353. struct netfront_queue *queue = info->queue;
  354. struct sk_buff *skb = info->skb;
  355. id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
  356. tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
  357. ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
  358. WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
  359. gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
  360. gfn, GNTMAP_readonly);
  361. queue->tx_skbs[id].skb = skb;
  362. queue->grant_tx_page[id] = page;
  363. queue->grant_tx_ref[id] = ref;
  364. tx->id = id;
  365. tx->gref = ref;
  366. tx->offset = offset;
  367. tx->size = len;
  368. tx->flags = 0;
  369. info->tx = tx;
  370. info->size += tx->size;
  371. }
  372. static struct xen_netif_tx_request *xennet_make_first_txreq(
  373. struct netfront_queue *queue, struct sk_buff *skb,
  374. struct page *page, unsigned int offset, unsigned int len)
  375. {
  376. struct xennet_gnttab_make_txreq info = {
  377. .queue = queue,
  378. .skb = skb,
  379. .page = page,
  380. .size = 0,
  381. };
  382. gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info);
  383. return info.tx;
  384. }
  385. static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
  386. unsigned int len, void *data)
  387. {
  388. struct xennet_gnttab_make_txreq *info = data;
  389. info->tx->flags |= XEN_NETTXF_more_data;
  390. skb_get(info->skb);
  391. xennet_tx_setup_grant(gfn, offset, len, data);
  392. }
  393. static struct xen_netif_tx_request *xennet_make_txreqs(
  394. struct netfront_queue *queue, struct xen_netif_tx_request *tx,
  395. struct sk_buff *skb, struct page *page,
  396. unsigned int offset, unsigned int len)
  397. {
  398. struct xennet_gnttab_make_txreq info = {
  399. .queue = queue,
  400. .skb = skb,
  401. .tx = tx,
  402. };
  403. /* Skip unused frames from start of page */
  404. page += offset >> PAGE_SHIFT;
  405. offset &= ~PAGE_MASK;
  406. while (len) {
  407. info.page = page;
  408. info.size = 0;
  409. gnttab_foreach_grant_in_range(page, offset, len,
  410. xennet_make_one_txreq,
  411. &info);
  412. page++;
  413. offset = 0;
  414. len -= info.size;
  415. }
  416. return info.tx;
  417. }
  418. /*
  419. * Count how many ring slots are required to send this skb. Each frag
  420. * might be a compound page.
  421. */
  422. static int xennet_count_skb_slots(struct sk_buff *skb)
  423. {
  424. int i, frags = skb_shinfo(skb)->nr_frags;
  425. int slots;
  426. slots = gnttab_count_grant(offset_in_page(skb->data),
  427. skb_headlen(skb));
  428. for (i = 0; i < frags; i++) {
  429. skb_frag_t *frag = skb_shinfo(skb)->frags + i;
  430. unsigned long size = skb_frag_size(frag);
  431. unsigned long offset = frag->page_offset;
  432. /* Skip unused frames from start of page */
  433. offset &= ~PAGE_MASK;
  434. slots += gnttab_count_grant(offset, size);
  435. }
  436. return slots;
  437. }
  438. static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
  439. void *accel_priv, select_queue_fallback_t fallback)
  440. {
  441. unsigned int num_queues = dev->real_num_tx_queues;
  442. u32 hash;
  443. u16 queue_idx;
  444. /* First, check if there is only one queue */
  445. if (num_queues == 1) {
  446. queue_idx = 0;
  447. } else {
  448. hash = skb_get_hash(skb);
  449. queue_idx = hash % num_queues;
  450. }
  451. return queue_idx;
  452. }
  453. #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
  454. static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
  455. {
  456. struct netfront_info *np = netdev_priv(dev);
  457. struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
  458. struct xen_netif_tx_request *tx, *first_tx;
  459. unsigned int i;
  460. int notify;
  461. int slots;
  462. struct page *page;
  463. unsigned int offset;
  464. unsigned int len;
  465. unsigned long flags;
  466. struct netfront_queue *queue = NULL;
  467. unsigned int num_queues = dev->real_num_tx_queues;
  468. u16 queue_index;
  469. struct sk_buff *nskb;
  470. /* Drop the packet if no queues are set up */
  471. if (num_queues < 1)
  472. goto drop;
  473. /* Determine which queue to transmit this SKB on */
  474. queue_index = skb_get_queue_mapping(skb);
  475. queue = &np->queues[queue_index];
  476. /* If skb->len is too big for wire format, drop skb and alert
  477. * user about misconfiguration.
  478. */
  479. if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
  480. net_alert_ratelimited(
  481. "xennet: skb->len = %u, too big for wire format\n",
  482. skb->len);
  483. goto drop;
  484. }
  485. slots = xennet_count_skb_slots(skb);
  486. if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
  487. net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
  488. slots, skb->len);
  489. if (skb_linearize(skb))
  490. goto drop;
  491. }
  492. page = virt_to_page(skb->data);
  493. offset = offset_in_page(skb->data);
  494. /* The first req should be at least ETH_HLEN size or the packet will be
  495. * dropped by netback.
  496. */
  497. if (unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
  498. nskb = skb_copy(skb, GFP_ATOMIC);
  499. if (!nskb)
  500. goto drop;
  501. dev_kfree_skb_any(skb);
  502. skb = nskb;
  503. page = virt_to_page(skb->data);
  504. offset = offset_in_page(skb->data);
  505. }
  506. len = skb_headlen(skb);
  507. spin_lock_irqsave(&queue->tx_lock, flags);
  508. if (unlikely(!netif_carrier_ok(dev) ||
  509. (slots > 1 && !xennet_can_sg(dev)) ||
  510. netif_needs_gso(skb, netif_skb_features(skb)))) {
  511. spin_unlock_irqrestore(&queue->tx_lock, flags);
  512. goto drop;
  513. }
  514. /* First request for the linear area. */
  515. first_tx = tx = xennet_make_first_txreq(queue, skb,
  516. page, offset, len);
  517. offset += tx->size;
  518. if (offset == PAGE_SIZE) {
  519. page++;
  520. offset = 0;
  521. }
  522. len -= tx->size;
  523. if (skb->ip_summed == CHECKSUM_PARTIAL)
  524. /* local packet? */
  525. tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
  526. else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  527. /* remote but checksummed. */
  528. tx->flags |= XEN_NETTXF_data_validated;
  529. /* Optional extra info after the first request. */
  530. if (skb_shinfo(skb)->gso_size) {
  531. struct xen_netif_extra_info *gso;
  532. gso = (struct xen_netif_extra_info *)
  533. RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
  534. tx->flags |= XEN_NETTXF_extra_info;
  535. gso->u.gso.size = skb_shinfo(skb)->gso_size;
  536. gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
  537. XEN_NETIF_GSO_TYPE_TCPV6 :
  538. XEN_NETIF_GSO_TYPE_TCPV4;
  539. gso->u.gso.pad = 0;
  540. gso->u.gso.features = 0;
  541. gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
  542. gso->flags = 0;
  543. }
  544. /* Requests for the rest of the linear area. */
  545. tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
  546. /* Requests for all the frags. */
  547. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  548. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  549. tx = xennet_make_txreqs(queue, tx, skb,
  550. skb_frag_page(frag), frag->page_offset,
  551. skb_frag_size(frag));
  552. }
  553. /* First request has the packet length. */
  554. first_tx->size = skb->len;
  555. RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
  556. if (notify)
  557. notify_remote_via_irq(queue->tx_irq);
  558. u64_stats_update_begin(&tx_stats->syncp);
  559. tx_stats->bytes += skb->len;
  560. tx_stats->packets++;
  561. u64_stats_update_end(&tx_stats->syncp);
  562. /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
  563. xennet_tx_buf_gc(queue);
  564. if (!netfront_tx_slot_available(queue))
  565. netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
  566. spin_unlock_irqrestore(&queue->tx_lock, flags);
  567. return NETDEV_TX_OK;
  568. drop:
  569. dev->stats.tx_dropped++;
  570. dev_kfree_skb_any(skb);
  571. return NETDEV_TX_OK;
  572. }
  573. static int xennet_close(struct net_device *dev)
  574. {
  575. struct netfront_info *np = netdev_priv(dev);
  576. unsigned int num_queues = dev->real_num_tx_queues;
  577. unsigned int i;
  578. struct netfront_queue *queue;
  579. netif_tx_stop_all_queues(np->netdev);
  580. for (i = 0; i < num_queues; ++i) {
  581. queue = &np->queues[i];
  582. napi_disable(&queue->napi);
  583. }
  584. return 0;
  585. }
  586. static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
  587. grant_ref_t ref)
  588. {
  589. int new = xennet_rxidx(queue->rx.req_prod_pvt);
  590. BUG_ON(queue->rx_skbs[new]);
  591. queue->rx_skbs[new] = skb;
  592. queue->grant_rx_ref[new] = ref;
  593. RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
  594. RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
  595. queue->rx.req_prod_pvt++;
  596. }
  597. static int xennet_get_extras(struct netfront_queue *queue,
  598. struct xen_netif_extra_info *extras,
  599. RING_IDX rp)
  600. {
  601. struct xen_netif_extra_info *extra;
  602. struct device *dev = &queue->info->netdev->dev;
  603. RING_IDX cons = queue->rx.rsp_cons;
  604. int err = 0;
  605. do {
  606. struct sk_buff *skb;
  607. grant_ref_t ref;
  608. if (unlikely(cons + 1 == rp)) {
  609. if (net_ratelimit())
  610. dev_warn(dev, "Missing extra info\n");
  611. err = -EBADR;
  612. break;
  613. }
  614. extra = (struct xen_netif_extra_info *)
  615. RING_GET_RESPONSE(&queue->rx, ++cons);
  616. if (unlikely(!extra->type ||
  617. extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
  618. if (net_ratelimit())
  619. dev_warn(dev, "Invalid extra type: %d\n",
  620. extra->type);
  621. err = -EINVAL;
  622. } else {
  623. memcpy(&extras[extra->type - 1], extra,
  624. sizeof(*extra));
  625. }
  626. skb = xennet_get_rx_skb(queue, cons);
  627. ref = xennet_get_rx_ref(queue, cons);
  628. xennet_move_rx_slot(queue, skb, ref);
  629. } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
  630. queue->rx.rsp_cons = cons;
  631. return err;
  632. }
  633. static int xennet_get_responses(struct netfront_queue *queue,
  634. struct netfront_rx_info *rinfo, RING_IDX rp,
  635. struct sk_buff_head *list)
  636. {
  637. struct xen_netif_rx_response *rx = &rinfo->rx;
  638. struct xen_netif_extra_info *extras = rinfo->extras;
  639. struct device *dev = &queue->info->netdev->dev;
  640. RING_IDX cons = queue->rx.rsp_cons;
  641. struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
  642. grant_ref_t ref = xennet_get_rx_ref(queue, cons);
  643. int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
  644. int slots = 1;
  645. int err = 0;
  646. unsigned long ret;
  647. if (rx->flags & XEN_NETRXF_extra_info) {
  648. err = xennet_get_extras(queue, extras, rp);
  649. cons = queue->rx.rsp_cons;
  650. }
  651. for (;;) {
  652. if (unlikely(rx->status < 0 ||
  653. rx->offset + rx->status > XEN_PAGE_SIZE)) {
  654. if (net_ratelimit())
  655. dev_warn(dev, "rx->offset: %u, size: %d\n",
  656. rx->offset, rx->status);
  657. xennet_move_rx_slot(queue, skb, ref);
  658. err = -EINVAL;
  659. goto next;
  660. }
  661. /*
  662. * This definitely indicates a bug, either in this driver or in
  663. * the backend driver. In future this should flag the bad
  664. * situation to the system controller to reboot the backend.
  665. */
  666. if (ref == GRANT_INVALID_REF) {
  667. if (net_ratelimit())
  668. dev_warn(dev, "Bad rx response id %d.\n",
  669. rx->id);
  670. err = -EINVAL;
  671. goto next;
  672. }
  673. ret = gnttab_end_foreign_access_ref(ref, 0);
  674. BUG_ON(!ret);
  675. gnttab_release_grant_reference(&queue->gref_rx_head, ref);
  676. __skb_queue_tail(list, skb);
  677. next:
  678. if (!(rx->flags & XEN_NETRXF_more_data))
  679. break;
  680. if (cons + slots == rp) {
  681. if (net_ratelimit())
  682. dev_warn(dev, "Need more slots\n");
  683. err = -ENOENT;
  684. break;
  685. }
  686. rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
  687. skb = xennet_get_rx_skb(queue, cons + slots);
  688. ref = xennet_get_rx_ref(queue, cons + slots);
  689. slots++;
  690. }
  691. if (unlikely(slots > max)) {
  692. if (net_ratelimit())
  693. dev_warn(dev, "Too many slots\n");
  694. err = -E2BIG;
  695. }
  696. if (unlikely(err))
  697. queue->rx.rsp_cons = cons + slots;
  698. return err;
  699. }
  700. static int xennet_set_skb_gso(struct sk_buff *skb,
  701. struct xen_netif_extra_info *gso)
  702. {
  703. if (!gso->u.gso.size) {
  704. if (net_ratelimit())
  705. pr_warn("GSO size must not be zero\n");
  706. return -EINVAL;
  707. }
  708. if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
  709. gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
  710. if (net_ratelimit())
  711. pr_warn("Bad GSO type %d\n", gso->u.gso.type);
  712. return -EINVAL;
  713. }
  714. skb_shinfo(skb)->gso_size = gso->u.gso.size;
  715. skb_shinfo(skb)->gso_type =
  716. (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
  717. SKB_GSO_TCPV4 :
  718. SKB_GSO_TCPV6;
  719. /* Header must be checked, and gso_segs computed. */
  720. skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
  721. skb_shinfo(skb)->gso_segs = 0;
  722. return 0;
  723. }
  724. static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
  725. struct sk_buff *skb,
  726. struct sk_buff_head *list)
  727. {
  728. struct skb_shared_info *shinfo = skb_shinfo(skb);
  729. RING_IDX cons = queue->rx.rsp_cons;
  730. struct sk_buff *nskb;
  731. while ((nskb = __skb_dequeue(list))) {
  732. struct xen_netif_rx_response *rx =
  733. RING_GET_RESPONSE(&queue->rx, ++cons);
  734. skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
  735. if (shinfo->nr_frags == MAX_SKB_FRAGS) {
  736. unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
  737. BUG_ON(pull_to <= skb_headlen(skb));
  738. __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
  739. }
  740. BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
  741. skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
  742. rx->offset, rx->status, PAGE_SIZE);
  743. skb_shinfo(nskb)->nr_frags = 0;
  744. kfree_skb(nskb);
  745. }
  746. return cons;
  747. }
  748. static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
  749. {
  750. bool recalculate_partial_csum = false;
  751. /*
  752. * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
  753. * peers can fail to set NETRXF_csum_blank when sending a GSO
  754. * frame. In this case force the SKB to CHECKSUM_PARTIAL and
  755. * recalculate the partial checksum.
  756. */
  757. if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
  758. struct netfront_info *np = netdev_priv(dev);
  759. atomic_inc(&np->rx_gso_checksum_fixup);
  760. skb->ip_summed = CHECKSUM_PARTIAL;
  761. recalculate_partial_csum = true;
  762. }
  763. /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
  764. if (skb->ip_summed != CHECKSUM_PARTIAL)
  765. return 0;
  766. return skb_checksum_setup(skb, recalculate_partial_csum);
  767. }
  768. static int handle_incoming_queue(struct netfront_queue *queue,
  769. struct sk_buff_head *rxq)
  770. {
  771. struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
  772. int packets_dropped = 0;
  773. struct sk_buff *skb;
  774. while ((skb = __skb_dequeue(rxq)) != NULL) {
  775. int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
  776. if (pull_to > skb_headlen(skb))
  777. __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
  778. /* Ethernet work: Delayed to here as it peeks the header. */
  779. skb->protocol = eth_type_trans(skb, queue->info->netdev);
  780. skb_reset_network_header(skb);
  781. if (checksum_setup(queue->info->netdev, skb)) {
  782. kfree_skb(skb);
  783. packets_dropped++;
  784. queue->info->netdev->stats.rx_errors++;
  785. continue;
  786. }
  787. u64_stats_update_begin(&rx_stats->syncp);
  788. rx_stats->packets++;
  789. rx_stats->bytes += skb->len;
  790. u64_stats_update_end(&rx_stats->syncp);
  791. /* Pass it up. */
  792. napi_gro_receive(&queue->napi, skb);
  793. }
  794. return packets_dropped;
  795. }
  796. static int xennet_poll(struct napi_struct *napi, int budget)
  797. {
  798. struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
  799. struct net_device *dev = queue->info->netdev;
  800. struct sk_buff *skb;
  801. struct netfront_rx_info rinfo;
  802. struct xen_netif_rx_response *rx = &rinfo.rx;
  803. struct xen_netif_extra_info *extras = rinfo.extras;
  804. RING_IDX i, rp;
  805. int work_done;
  806. struct sk_buff_head rxq;
  807. struct sk_buff_head errq;
  808. struct sk_buff_head tmpq;
  809. int err;
  810. spin_lock(&queue->rx_lock);
  811. skb_queue_head_init(&rxq);
  812. skb_queue_head_init(&errq);
  813. skb_queue_head_init(&tmpq);
  814. rp = queue->rx.sring->rsp_prod;
  815. rmb(); /* Ensure we see queued responses up to 'rp'. */
  816. i = queue->rx.rsp_cons;
  817. work_done = 0;
  818. while ((i != rp) && (work_done < budget)) {
  819. memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
  820. memset(extras, 0, sizeof(rinfo.extras));
  821. err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
  822. if (unlikely(err)) {
  823. err:
  824. while ((skb = __skb_dequeue(&tmpq)))
  825. __skb_queue_tail(&errq, skb);
  826. dev->stats.rx_errors++;
  827. i = queue->rx.rsp_cons;
  828. continue;
  829. }
  830. skb = __skb_dequeue(&tmpq);
  831. if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
  832. struct xen_netif_extra_info *gso;
  833. gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  834. if (unlikely(xennet_set_skb_gso(skb, gso))) {
  835. __skb_queue_head(&tmpq, skb);
  836. queue->rx.rsp_cons += skb_queue_len(&tmpq);
  837. goto err;
  838. }
  839. }
  840. NETFRONT_SKB_CB(skb)->pull_to = rx->status;
  841. if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
  842. NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
  843. skb_shinfo(skb)->frags[0].page_offset = rx->offset;
  844. skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
  845. skb->data_len = rx->status;
  846. skb->len += rx->status;
  847. i = xennet_fill_frags(queue, skb, &tmpq);
  848. if (rx->flags & XEN_NETRXF_csum_blank)
  849. skb->ip_summed = CHECKSUM_PARTIAL;
  850. else if (rx->flags & XEN_NETRXF_data_validated)
  851. skb->ip_summed = CHECKSUM_UNNECESSARY;
  852. __skb_queue_tail(&rxq, skb);
  853. queue->rx.rsp_cons = ++i;
  854. work_done++;
  855. }
  856. __skb_queue_purge(&errq);
  857. work_done -= handle_incoming_queue(queue, &rxq);
  858. xennet_alloc_rx_buffers(queue);
  859. if (work_done < budget) {
  860. int more_to_do = 0;
  861. napi_complete_done(napi, work_done);
  862. RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
  863. if (more_to_do)
  864. napi_schedule(napi);
  865. }
  866. spin_unlock(&queue->rx_lock);
  867. return work_done;
  868. }
  869. static int xennet_change_mtu(struct net_device *dev, int mtu)
  870. {
  871. int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
  872. if (mtu > max)
  873. return -EINVAL;
  874. dev->mtu = mtu;
  875. return 0;
  876. }
  877. static void xennet_get_stats64(struct net_device *dev,
  878. struct rtnl_link_stats64 *tot)
  879. {
  880. struct netfront_info *np = netdev_priv(dev);
  881. int cpu;
  882. for_each_possible_cpu(cpu) {
  883. struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
  884. struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
  885. u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
  886. unsigned int start;
  887. do {
  888. start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
  889. tx_packets = tx_stats->packets;
  890. tx_bytes = tx_stats->bytes;
  891. } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
  892. do {
  893. start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
  894. rx_packets = rx_stats->packets;
  895. rx_bytes = rx_stats->bytes;
  896. } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
  897. tot->rx_packets += rx_packets;
  898. tot->tx_packets += tx_packets;
  899. tot->rx_bytes += rx_bytes;
  900. tot->tx_bytes += tx_bytes;
  901. }
  902. tot->rx_errors = dev->stats.rx_errors;
  903. tot->tx_dropped = dev->stats.tx_dropped;
  904. }
  905. static void xennet_release_tx_bufs(struct netfront_queue *queue)
  906. {
  907. struct sk_buff *skb;
  908. int i;
  909. for (i = 0; i < NET_TX_RING_SIZE; i++) {
  910. /* Skip over entries which are actually freelist references */
  911. if (skb_entry_is_link(&queue->tx_skbs[i]))
  912. continue;
  913. skb = queue->tx_skbs[i].skb;
  914. get_page(queue->grant_tx_page[i]);
  915. gnttab_end_foreign_access(queue->grant_tx_ref[i],
  916. GNTMAP_readonly,
  917. (unsigned long)page_address(queue->grant_tx_page[i]));
  918. queue->grant_tx_page[i] = NULL;
  919. queue->grant_tx_ref[i] = GRANT_INVALID_REF;
  920. add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
  921. dev_kfree_skb_irq(skb);
  922. }
  923. }
  924. static void xennet_release_rx_bufs(struct netfront_queue *queue)
  925. {
  926. int id, ref;
  927. spin_lock_bh(&queue->rx_lock);
  928. for (id = 0; id < NET_RX_RING_SIZE; id++) {
  929. struct sk_buff *skb;
  930. struct page *page;
  931. skb = queue->rx_skbs[id];
  932. if (!skb)
  933. continue;
  934. ref = queue->grant_rx_ref[id];
  935. if (ref == GRANT_INVALID_REF)
  936. continue;
  937. page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
  938. /* gnttab_end_foreign_access() needs a page ref until
  939. * foreign access is ended (which may be deferred).
  940. */
  941. get_page(page);
  942. gnttab_end_foreign_access(ref, 0,
  943. (unsigned long)page_address(page));
  944. queue->grant_rx_ref[id] = GRANT_INVALID_REF;
  945. kfree_skb(skb);
  946. }
  947. spin_unlock_bh(&queue->rx_lock);
  948. }
  949. static netdev_features_t xennet_fix_features(struct net_device *dev,
  950. netdev_features_t features)
  951. {
  952. struct netfront_info *np = netdev_priv(dev);
  953. if (features & NETIF_F_SG &&
  954. !xenbus_read_unsigned(np->xbdev->otherend, "feature-sg", 0))
  955. features &= ~NETIF_F_SG;
  956. if (features & NETIF_F_IPV6_CSUM &&
  957. !xenbus_read_unsigned(np->xbdev->otherend,
  958. "feature-ipv6-csum-offload", 0))
  959. features &= ~NETIF_F_IPV6_CSUM;
  960. if (features & NETIF_F_TSO &&
  961. !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv4", 0))
  962. features &= ~NETIF_F_TSO;
  963. if (features & NETIF_F_TSO6 &&
  964. !xenbus_read_unsigned(np->xbdev->otherend, "feature-gso-tcpv6", 0))
  965. features &= ~NETIF_F_TSO6;
  966. return features;
  967. }
  968. static int xennet_set_features(struct net_device *dev,
  969. netdev_features_t features)
  970. {
  971. if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
  972. netdev_info(dev, "Reducing MTU because no SG offload");
  973. dev->mtu = ETH_DATA_LEN;
  974. }
  975. return 0;
  976. }
  977. static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
  978. {
  979. struct netfront_queue *queue = dev_id;
  980. unsigned long flags;
  981. spin_lock_irqsave(&queue->tx_lock, flags);
  982. xennet_tx_buf_gc(queue);
  983. spin_unlock_irqrestore(&queue->tx_lock, flags);
  984. return IRQ_HANDLED;
  985. }
  986. static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
  987. {
  988. struct netfront_queue *queue = dev_id;
  989. struct net_device *dev = queue->info->netdev;
  990. if (likely(netif_carrier_ok(dev) &&
  991. RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
  992. napi_schedule(&queue->napi);
  993. return IRQ_HANDLED;
  994. }
  995. static irqreturn_t xennet_interrupt(int irq, void *dev_id)
  996. {
  997. xennet_tx_interrupt(irq, dev_id);
  998. xennet_rx_interrupt(irq, dev_id);
  999. return IRQ_HANDLED;
  1000. }
  1001. #ifdef CONFIG_NET_POLL_CONTROLLER
  1002. static void xennet_poll_controller(struct net_device *dev)
  1003. {
  1004. /* Poll each queue */
  1005. struct netfront_info *info = netdev_priv(dev);
  1006. unsigned int num_queues = dev->real_num_tx_queues;
  1007. unsigned int i;
  1008. for (i = 0; i < num_queues; ++i)
  1009. xennet_interrupt(0, &info->queues[i]);
  1010. }
  1011. #endif
  1012. static const struct net_device_ops xennet_netdev_ops = {
  1013. .ndo_open = xennet_open,
  1014. .ndo_stop = xennet_close,
  1015. .ndo_start_xmit = xennet_start_xmit,
  1016. .ndo_change_mtu = xennet_change_mtu,
  1017. .ndo_get_stats64 = xennet_get_stats64,
  1018. .ndo_set_mac_address = eth_mac_addr,
  1019. .ndo_validate_addr = eth_validate_addr,
  1020. .ndo_fix_features = xennet_fix_features,
  1021. .ndo_set_features = xennet_set_features,
  1022. .ndo_select_queue = xennet_select_queue,
  1023. #ifdef CONFIG_NET_POLL_CONTROLLER
  1024. .ndo_poll_controller = xennet_poll_controller,
  1025. #endif
  1026. };
  1027. static void xennet_free_netdev(struct net_device *netdev)
  1028. {
  1029. struct netfront_info *np = netdev_priv(netdev);
  1030. free_percpu(np->rx_stats);
  1031. free_percpu(np->tx_stats);
  1032. free_netdev(netdev);
  1033. }
  1034. static struct net_device *xennet_create_dev(struct xenbus_device *dev)
  1035. {
  1036. int err;
  1037. struct net_device *netdev;
  1038. struct netfront_info *np;
  1039. netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
  1040. if (!netdev)
  1041. return ERR_PTR(-ENOMEM);
  1042. np = netdev_priv(netdev);
  1043. np->xbdev = dev;
  1044. np->queues = NULL;
  1045. err = -ENOMEM;
  1046. np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
  1047. if (np->rx_stats == NULL)
  1048. goto exit;
  1049. np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
  1050. if (np->tx_stats == NULL)
  1051. goto exit;
  1052. netdev->netdev_ops = &xennet_netdev_ops;
  1053. netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
  1054. NETIF_F_GSO_ROBUST;
  1055. netdev->hw_features = NETIF_F_SG |
  1056. NETIF_F_IPV6_CSUM |
  1057. NETIF_F_TSO | NETIF_F_TSO6;
  1058. /*
  1059. * Assume that all hw features are available for now. This set
  1060. * will be adjusted by the call to netdev_update_features() in
  1061. * xennet_connect() which is the earliest point where we can
  1062. * negotiate with the backend regarding supported features.
  1063. */
  1064. netdev->features |= netdev->hw_features;
  1065. netdev->ethtool_ops = &xennet_ethtool_ops;
  1066. netdev->min_mtu = 0;
  1067. netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE;
  1068. SET_NETDEV_DEV(netdev, &dev->dev);
  1069. np->netdev = netdev;
  1070. netif_carrier_off(netdev);
  1071. return netdev;
  1072. exit:
  1073. xennet_free_netdev(netdev);
  1074. return ERR_PTR(err);
  1075. }
  1076. /**
  1077. * Entry point to this code when a new device is created. Allocate the basic
  1078. * structures and the ring buffers for communication with the backend, and
  1079. * inform the backend of the appropriate details for those.
  1080. */
  1081. static int netfront_probe(struct xenbus_device *dev,
  1082. const struct xenbus_device_id *id)
  1083. {
  1084. int err;
  1085. struct net_device *netdev;
  1086. struct netfront_info *info;
  1087. netdev = xennet_create_dev(dev);
  1088. if (IS_ERR(netdev)) {
  1089. err = PTR_ERR(netdev);
  1090. xenbus_dev_fatal(dev, err, "creating netdev");
  1091. return err;
  1092. }
  1093. info = netdev_priv(netdev);
  1094. dev_set_drvdata(&dev->dev, info);
  1095. #ifdef CONFIG_SYSFS
  1096. info->netdev->sysfs_groups[0] = &xennet_dev_group;
  1097. #endif
  1098. err = register_netdev(info->netdev);
  1099. if (err) {
  1100. pr_warn("%s: register_netdev err=%d\n", __func__, err);
  1101. goto fail;
  1102. }
  1103. return 0;
  1104. fail:
  1105. xennet_free_netdev(netdev);
  1106. dev_set_drvdata(&dev->dev, NULL);
  1107. return err;
  1108. }
  1109. static void xennet_end_access(int ref, void *page)
  1110. {
  1111. /* This frees the page as a side-effect */
  1112. if (ref != GRANT_INVALID_REF)
  1113. gnttab_end_foreign_access(ref, 0, (unsigned long)page);
  1114. }
  1115. static void xennet_disconnect_backend(struct netfront_info *info)
  1116. {
  1117. unsigned int i = 0;
  1118. unsigned int num_queues = info->netdev->real_num_tx_queues;
  1119. netif_carrier_off(info->netdev);
  1120. for (i = 0; i < num_queues && info->queues; ++i) {
  1121. struct netfront_queue *queue = &info->queues[i];
  1122. del_timer_sync(&queue->rx_refill_timer);
  1123. if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
  1124. unbind_from_irqhandler(queue->tx_irq, queue);
  1125. if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
  1126. unbind_from_irqhandler(queue->tx_irq, queue);
  1127. unbind_from_irqhandler(queue->rx_irq, queue);
  1128. }
  1129. queue->tx_evtchn = queue->rx_evtchn = 0;
  1130. queue->tx_irq = queue->rx_irq = 0;
  1131. if (netif_running(info->netdev))
  1132. napi_synchronize(&queue->napi);
  1133. xennet_release_tx_bufs(queue);
  1134. xennet_release_rx_bufs(queue);
  1135. gnttab_free_grant_references(queue->gref_tx_head);
  1136. gnttab_free_grant_references(queue->gref_rx_head);
  1137. /* End access and free the pages */
  1138. xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
  1139. xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
  1140. queue->tx_ring_ref = GRANT_INVALID_REF;
  1141. queue->rx_ring_ref = GRANT_INVALID_REF;
  1142. queue->tx.sring = NULL;
  1143. queue->rx.sring = NULL;
  1144. }
  1145. }
  1146. /**
  1147. * We are reconnecting to the backend, due to a suspend/resume, or a backend
  1148. * driver restart. We tear down our netif structure and recreate it, but
  1149. * leave the device-layer structures intact so that this is transparent to the
  1150. * rest of the kernel.
  1151. */
  1152. static int netfront_resume(struct xenbus_device *dev)
  1153. {
  1154. struct netfront_info *info = dev_get_drvdata(&dev->dev);
  1155. dev_dbg(&dev->dev, "%s\n", dev->nodename);
  1156. xennet_disconnect_backend(info);
  1157. return 0;
  1158. }
  1159. static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
  1160. {
  1161. char *s, *e, *macstr;
  1162. int i;
  1163. macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
  1164. if (IS_ERR(macstr))
  1165. return PTR_ERR(macstr);
  1166. for (i = 0; i < ETH_ALEN; i++) {
  1167. mac[i] = simple_strtoul(s, &e, 16);
  1168. if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
  1169. kfree(macstr);
  1170. return -ENOENT;
  1171. }
  1172. s = e+1;
  1173. }
  1174. kfree(macstr);
  1175. return 0;
  1176. }
  1177. static int setup_netfront_single(struct netfront_queue *queue)
  1178. {
  1179. int err;
  1180. err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
  1181. if (err < 0)
  1182. goto fail;
  1183. err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
  1184. xennet_interrupt,
  1185. 0, queue->info->netdev->name, queue);
  1186. if (err < 0)
  1187. goto bind_fail;
  1188. queue->rx_evtchn = queue->tx_evtchn;
  1189. queue->rx_irq = queue->tx_irq = err;
  1190. return 0;
  1191. bind_fail:
  1192. xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
  1193. queue->tx_evtchn = 0;
  1194. fail:
  1195. return err;
  1196. }
  1197. static int setup_netfront_split(struct netfront_queue *queue)
  1198. {
  1199. int err;
  1200. err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
  1201. if (err < 0)
  1202. goto fail;
  1203. err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
  1204. if (err < 0)
  1205. goto alloc_rx_evtchn_fail;
  1206. snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
  1207. "%s-tx", queue->name);
  1208. err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
  1209. xennet_tx_interrupt,
  1210. 0, queue->tx_irq_name, queue);
  1211. if (err < 0)
  1212. goto bind_tx_fail;
  1213. queue->tx_irq = err;
  1214. snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
  1215. "%s-rx", queue->name);
  1216. err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
  1217. xennet_rx_interrupt,
  1218. 0, queue->rx_irq_name, queue);
  1219. if (err < 0)
  1220. goto bind_rx_fail;
  1221. queue->rx_irq = err;
  1222. return 0;
  1223. bind_rx_fail:
  1224. unbind_from_irqhandler(queue->tx_irq, queue);
  1225. queue->tx_irq = 0;
  1226. bind_tx_fail:
  1227. xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
  1228. queue->rx_evtchn = 0;
  1229. alloc_rx_evtchn_fail:
  1230. xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
  1231. queue->tx_evtchn = 0;
  1232. fail:
  1233. return err;
  1234. }
  1235. static int setup_netfront(struct xenbus_device *dev,
  1236. struct netfront_queue *queue, unsigned int feature_split_evtchn)
  1237. {
  1238. struct xen_netif_tx_sring *txs;
  1239. struct xen_netif_rx_sring *rxs;
  1240. grant_ref_t gref;
  1241. int err;
  1242. queue->tx_ring_ref = GRANT_INVALID_REF;
  1243. queue->rx_ring_ref = GRANT_INVALID_REF;
  1244. queue->rx.sring = NULL;
  1245. queue->tx.sring = NULL;
  1246. txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
  1247. if (!txs) {
  1248. err = -ENOMEM;
  1249. xenbus_dev_fatal(dev, err, "allocating tx ring page");
  1250. goto fail;
  1251. }
  1252. SHARED_RING_INIT(txs);
  1253. FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
  1254. err = xenbus_grant_ring(dev, txs, 1, &gref);
  1255. if (err < 0)
  1256. goto grant_tx_ring_fail;
  1257. queue->tx_ring_ref = gref;
  1258. rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
  1259. if (!rxs) {
  1260. err = -ENOMEM;
  1261. xenbus_dev_fatal(dev, err, "allocating rx ring page");
  1262. goto alloc_rx_ring_fail;
  1263. }
  1264. SHARED_RING_INIT(rxs);
  1265. FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
  1266. err = xenbus_grant_ring(dev, rxs, 1, &gref);
  1267. if (err < 0)
  1268. goto grant_rx_ring_fail;
  1269. queue->rx_ring_ref = gref;
  1270. if (feature_split_evtchn)
  1271. err = setup_netfront_split(queue);
  1272. /* setup single event channel if
  1273. * a) feature-split-event-channels == 0
  1274. * b) feature-split-event-channels == 1 but failed to setup
  1275. */
  1276. if (!feature_split_evtchn || (feature_split_evtchn && err))
  1277. err = setup_netfront_single(queue);
  1278. if (err)
  1279. goto alloc_evtchn_fail;
  1280. return 0;
  1281. /* If we fail to setup netfront, it is safe to just revoke access to
  1282. * granted pages because backend is not accessing it at this point.
  1283. */
  1284. alloc_evtchn_fail:
  1285. gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
  1286. grant_rx_ring_fail:
  1287. free_page((unsigned long)rxs);
  1288. alloc_rx_ring_fail:
  1289. gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
  1290. grant_tx_ring_fail:
  1291. free_page((unsigned long)txs);
  1292. fail:
  1293. return err;
  1294. }
  1295. /* Queue-specific initialisation
  1296. * This used to be done in xennet_create_dev() but must now
  1297. * be run per-queue.
  1298. */
  1299. static int xennet_init_queue(struct netfront_queue *queue)
  1300. {
  1301. unsigned short i;
  1302. int err = 0;
  1303. spin_lock_init(&queue->tx_lock);
  1304. spin_lock_init(&queue->rx_lock);
  1305. setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
  1306. (unsigned long)queue);
  1307. snprintf(queue->name, sizeof(queue->name), "%s-q%u",
  1308. queue->info->netdev->name, queue->id);
  1309. /* Initialise tx_skbs as a free chain containing every entry. */
  1310. queue->tx_skb_freelist = 0;
  1311. for (i = 0; i < NET_TX_RING_SIZE; i++) {
  1312. skb_entry_set_link(&queue->tx_skbs[i], i+1);
  1313. queue->grant_tx_ref[i] = GRANT_INVALID_REF;
  1314. queue->grant_tx_page[i] = NULL;
  1315. }
  1316. /* Clear out rx_skbs */
  1317. for (i = 0; i < NET_RX_RING_SIZE; i++) {
  1318. queue->rx_skbs[i] = NULL;
  1319. queue->grant_rx_ref[i] = GRANT_INVALID_REF;
  1320. }
  1321. /* A grant for every tx ring slot */
  1322. if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
  1323. &queue->gref_tx_head) < 0) {
  1324. pr_alert("can't alloc tx grant refs\n");
  1325. err = -ENOMEM;
  1326. goto exit;
  1327. }
  1328. /* A grant for every rx ring slot */
  1329. if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
  1330. &queue->gref_rx_head) < 0) {
  1331. pr_alert("can't alloc rx grant refs\n");
  1332. err = -ENOMEM;
  1333. goto exit_free_tx;
  1334. }
  1335. return 0;
  1336. exit_free_tx:
  1337. gnttab_free_grant_references(queue->gref_tx_head);
  1338. exit:
  1339. return err;
  1340. }
  1341. static int write_queue_xenstore_keys(struct netfront_queue *queue,
  1342. struct xenbus_transaction *xbt, int write_hierarchical)
  1343. {
  1344. /* Write the queue-specific keys into XenStore in the traditional
  1345. * way for a single queue, or in a queue subkeys for multiple
  1346. * queues.
  1347. */
  1348. struct xenbus_device *dev = queue->info->xbdev;
  1349. int err;
  1350. const char *message;
  1351. char *path;
  1352. size_t pathsize;
  1353. /* Choose the correct place to write the keys */
  1354. if (write_hierarchical) {
  1355. pathsize = strlen(dev->nodename) + 10;
  1356. path = kzalloc(pathsize, GFP_KERNEL);
  1357. if (!path) {
  1358. err = -ENOMEM;
  1359. message = "out of memory while writing ring references";
  1360. goto error;
  1361. }
  1362. snprintf(path, pathsize, "%s/queue-%u",
  1363. dev->nodename, queue->id);
  1364. } else {
  1365. path = (char *)dev->nodename;
  1366. }
  1367. /* Write ring references */
  1368. err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
  1369. queue->tx_ring_ref);
  1370. if (err) {
  1371. message = "writing tx-ring-ref";
  1372. goto error;
  1373. }
  1374. err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
  1375. queue->rx_ring_ref);
  1376. if (err) {
  1377. message = "writing rx-ring-ref";
  1378. goto error;
  1379. }
  1380. /* Write event channels; taking into account both shared
  1381. * and split event channel scenarios.
  1382. */
  1383. if (queue->tx_evtchn == queue->rx_evtchn) {
  1384. /* Shared event channel */
  1385. err = xenbus_printf(*xbt, path,
  1386. "event-channel", "%u", queue->tx_evtchn);
  1387. if (err) {
  1388. message = "writing event-channel";
  1389. goto error;
  1390. }
  1391. } else {
  1392. /* Split event channels */
  1393. err = xenbus_printf(*xbt, path,
  1394. "event-channel-tx", "%u", queue->tx_evtchn);
  1395. if (err) {
  1396. message = "writing event-channel-tx";
  1397. goto error;
  1398. }
  1399. err = xenbus_printf(*xbt, path,
  1400. "event-channel-rx", "%u", queue->rx_evtchn);
  1401. if (err) {
  1402. message = "writing event-channel-rx";
  1403. goto error;
  1404. }
  1405. }
  1406. if (write_hierarchical)
  1407. kfree(path);
  1408. return 0;
  1409. error:
  1410. if (write_hierarchical)
  1411. kfree(path);
  1412. xenbus_dev_fatal(dev, err, "%s", message);
  1413. return err;
  1414. }
  1415. static void xennet_destroy_queues(struct netfront_info *info)
  1416. {
  1417. unsigned int i;
  1418. rtnl_lock();
  1419. for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
  1420. struct netfront_queue *queue = &info->queues[i];
  1421. if (netif_running(info->netdev))
  1422. napi_disable(&queue->napi);
  1423. netif_napi_del(&queue->napi);
  1424. }
  1425. rtnl_unlock();
  1426. kfree(info->queues);
  1427. info->queues = NULL;
  1428. }
  1429. static int xennet_create_queues(struct netfront_info *info,
  1430. unsigned int *num_queues)
  1431. {
  1432. unsigned int i;
  1433. int ret;
  1434. info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
  1435. GFP_KERNEL);
  1436. if (!info->queues)
  1437. return -ENOMEM;
  1438. rtnl_lock();
  1439. for (i = 0; i < *num_queues; i++) {
  1440. struct netfront_queue *queue = &info->queues[i];
  1441. queue->id = i;
  1442. queue->info = info;
  1443. ret = xennet_init_queue(queue);
  1444. if (ret < 0) {
  1445. dev_warn(&info->netdev->dev,
  1446. "only created %d queues\n", i);
  1447. *num_queues = i;
  1448. break;
  1449. }
  1450. netif_napi_add(queue->info->netdev, &queue->napi,
  1451. xennet_poll, 64);
  1452. if (netif_running(info->netdev))
  1453. napi_enable(&queue->napi);
  1454. }
  1455. netif_set_real_num_tx_queues(info->netdev, *num_queues);
  1456. rtnl_unlock();
  1457. if (*num_queues == 0) {
  1458. dev_err(&info->netdev->dev, "no queues\n");
  1459. return -EINVAL;
  1460. }
  1461. return 0;
  1462. }
  1463. /* Common code used when first setting up, and when resuming. */
  1464. static int talk_to_netback(struct xenbus_device *dev,
  1465. struct netfront_info *info)
  1466. {
  1467. const char *message;
  1468. struct xenbus_transaction xbt;
  1469. int err;
  1470. unsigned int feature_split_evtchn;
  1471. unsigned int i = 0;
  1472. unsigned int max_queues = 0;
  1473. struct netfront_queue *queue = NULL;
  1474. unsigned int num_queues = 1;
  1475. info->netdev->irq = 0;
  1476. /* Check if backend supports multiple queues */
  1477. max_queues = xenbus_read_unsigned(info->xbdev->otherend,
  1478. "multi-queue-max-queues", 1);
  1479. num_queues = min(max_queues, xennet_max_queues);
  1480. /* Check feature-split-event-channels */
  1481. feature_split_evtchn = xenbus_read_unsigned(info->xbdev->otherend,
  1482. "feature-split-event-channels", 0);
  1483. /* Read mac addr. */
  1484. err = xen_net_read_mac(dev, info->netdev->dev_addr);
  1485. if (err) {
  1486. xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
  1487. goto out;
  1488. }
  1489. if (info->queues)
  1490. xennet_destroy_queues(info);
  1491. err = xennet_create_queues(info, &num_queues);
  1492. if (err < 0) {
  1493. xenbus_dev_fatal(dev, err, "creating queues");
  1494. kfree(info->queues);
  1495. info->queues = NULL;
  1496. goto out;
  1497. }
  1498. /* Create shared ring, alloc event channel -- for each queue */
  1499. for (i = 0; i < num_queues; ++i) {
  1500. queue = &info->queues[i];
  1501. err = setup_netfront(dev, queue, feature_split_evtchn);
  1502. if (err)
  1503. goto destroy_ring;
  1504. }
  1505. again:
  1506. err = xenbus_transaction_start(&xbt);
  1507. if (err) {
  1508. xenbus_dev_fatal(dev, err, "starting transaction");
  1509. goto destroy_ring;
  1510. }
  1511. if (xenbus_exists(XBT_NIL,
  1512. info->xbdev->otherend, "multi-queue-max-queues")) {
  1513. /* Write the number of queues */
  1514. err = xenbus_printf(xbt, dev->nodename,
  1515. "multi-queue-num-queues", "%u", num_queues);
  1516. if (err) {
  1517. message = "writing multi-queue-num-queues";
  1518. goto abort_transaction_no_dev_fatal;
  1519. }
  1520. }
  1521. if (num_queues == 1) {
  1522. err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
  1523. if (err)
  1524. goto abort_transaction_no_dev_fatal;
  1525. } else {
  1526. /* Write the keys for each queue */
  1527. for (i = 0; i < num_queues; ++i) {
  1528. queue = &info->queues[i];
  1529. err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
  1530. if (err)
  1531. goto abort_transaction_no_dev_fatal;
  1532. }
  1533. }
  1534. /* The remaining keys are not queue-specific */
  1535. err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
  1536. 1);
  1537. if (err) {
  1538. message = "writing request-rx-copy";
  1539. goto abort_transaction;
  1540. }
  1541. err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
  1542. if (err) {
  1543. message = "writing feature-rx-notify";
  1544. goto abort_transaction;
  1545. }
  1546. err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
  1547. if (err) {
  1548. message = "writing feature-sg";
  1549. goto abort_transaction;
  1550. }
  1551. err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
  1552. if (err) {
  1553. message = "writing feature-gso-tcpv4";
  1554. goto abort_transaction;
  1555. }
  1556. err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
  1557. if (err) {
  1558. message = "writing feature-gso-tcpv6";
  1559. goto abort_transaction;
  1560. }
  1561. err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
  1562. "1");
  1563. if (err) {
  1564. message = "writing feature-ipv6-csum-offload";
  1565. goto abort_transaction;
  1566. }
  1567. err = xenbus_transaction_end(xbt, 0);
  1568. if (err) {
  1569. if (err == -EAGAIN)
  1570. goto again;
  1571. xenbus_dev_fatal(dev, err, "completing transaction");
  1572. goto destroy_ring;
  1573. }
  1574. return 0;
  1575. abort_transaction:
  1576. xenbus_dev_fatal(dev, err, "%s", message);
  1577. abort_transaction_no_dev_fatal:
  1578. xenbus_transaction_end(xbt, 1);
  1579. destroy_ring:
  1580. xennet_disconnect_backend(info);
  1581. xennet_destroy_queues(info);
  1582. out:
  1583. device_unregister(&dev->dev);
  1584. return err;
  1585. }
  1586. static int xennet_connect(struct net_device *dev)
  1587. {
  1588. struct netfront_info *np = netdev_priv(dev);
  1589. unsigned int num_queues = 0;
  1590. int err;
  1591. unsigned int j = 0;
  1592. struct netfront_queue *queue = NULL;
  1593. if (!xenbus_read_unsigned(np->xbdev->otherend, "feature-rx-copy", 0)) {
  1594. dev_info(&dev->dev,
  1595. "backend does not support copying receive path\n");
  1596. return -ENODEV;
  1597. }
  1598. err = talk_to_netback(np->xbdev, np);
  1599. if (err)
  1600. return err;
  1601. /* talk_to_netback() sets the correct number of queues */
  1602. num_queues = dev->real_num_tx_queues;
  1603. rtnl_lock();
  1604. netdev_update_features(dev);
  1605. rtnl_unlock();
  1606. /*
  1607. * All public and private state should now be sane. Get
  1608. * ready to start sending and receiving packets and give the driver
  1609. * domain a kick because we've probably just requeued some
  1610. * packets.
  1611. */
  1612. netif_carrier_on(np->netdev);
  1613. for (j = 0; j < num_queues; ++j) {
  1614. queue = &np->queues[j];
  1615. notify_remote_via_irq(queue->tx_irq);
  1616. if (queue->tx_irq != queue->rx_irq)
  1617. notify_remote_via_irq(queue->rx_irq);
  1618. spin_lock_irq(&queue->tx_lock);
  1619. xennet_tx_buf_gc(queue);
  1620. spin_unlock_irq(&queue->tx_lock);
  1621. spin_lock_bh(&queue->rx_lock);
  1622. xennet_alloc_rx_buffers(queue);
  1623. spin_unlock_bh(&queue->rx_lock);
  1624. }
  1625. return 0;
  1626. }
  1627. /**
  1628. * Callback received when the backend's state changes.
  1629. */
  1630. static void netback_changed(struct xenbus_device *dev,
  1631. enum xenbus_state backend_state)
  1632. {
  1633. struct netfront_info *np = dev_get_drvdata(&dev->dev);
  1634. struct net_device *netdev = np->netdev;
  1635. dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
  1636. switch (backend_state) {
  1637. case XenbusStateInitialising:
  1638. case XenbusStateInitialised:
  1639. case XenbusStateReconfiguring:
  1640. case XenbusStateReconfigured:
  1641. case XenbusStateUnknown:
  1642. break;
  1643. case XenbusStateInitWait:
  1644. if (dev->state != XenbusStateInitialising)
  1645. break;
  1646. if (xennet_connect(netdev) != 0)
  1647. break;
  1648. xenbus_switch_state(dev, XenbusStateConnected);
  1649. break;
  1650. case XenbusStateConnected:
  1651. netdev_notify_peers(netdev);
  1652. break;
  1653. case XenbusStateClosed:
  1654. if (dev->state == XenbusStateClosed)
  1655. break;
  1656. /* Missed the backend's CLOSING state -- fallthrough */
  1657. case XenbusStateClosing:
  1658. xenbus_frontend_closed(dev);
  1659. break;
  1660. }
  1661. }
  1662. static const struct xennet_stat {
  1663. char name[ETH_GSTRING_LEN];
  1664. u16 offset;
  1665. } xennet_stats[] = {
  1666. {
  1667. "rx_gso_checksum_fixup",
  1668. offsetof(struct netfront_info, rx_gso_checksum_fixup)
  1669. },
  1670. };
  1671. static int xennet_get_sset_count(struct net_device *dev, int string_set)
  1672. {
  1673. switch (string_set) {
  1674. case ETH_SS_STATS:
  1675. return ARRAY_SIZE(xennet_stats);
  1676. default:
  1677. return -EINVAL;
  1678. }
  1679. }
  1680. static void xennet_get_ethtool_stats(struct net_device *dev,
  1681. struct ethtool_stats *stats, u64 * data)
  1682. {
  1683. void *np = netdev_priv(dev);
  1684. int i;
  1685. for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
  1686. data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
  1687. }
  1688. static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
  1689. {
  1690. int i;
  1691. switch (stringset) {
  1692. case ETH_SS_STATS:
  1693. for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
  1694. memcpy(data + i * ETH_GSTRING_LEN,
  1695. xennet_stats[i].name, ETH_GSTRING_LEN);
  1696. break;
  1697. }
  1698. }
  1699. static const struct ethtool_ops xennet_ethtool_ops =
  1700. {
  1701. .get_link = ethtool_op_get_link,
  1702. .get_sset_count = xennet_get_sset_count,
  1703. .get_ethtool_stats = xennet_get_ethtool_stats,
  1704. .get_strings = xennet_get_strings,
  1705. };
  1706. #ifdef CONFIG_SYSFS
  1707. static ssize_t show_rxbuf(struct device *dev,
  1708. struct device_attribute *attr, char *buf)
  1709. {
  1710. return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
  1711. }
  1712. static ssize_t store_rxbuf(struct device *dev,
  1713. struct device_attribute *attr,
  1714. const char *buf, size_t len)
  1715. {
  1716. char *endp;
  1717. unsigned long target;
  1718. if (!capable(CAP_NET_ADMIN))
  1719. return -EPERM;
  1720. target = simple_strtoul(buf, &endp, 0);
  1721. if (endp == buf)
  1722. return -EBADMSG;
  1723. /* rxbuf_min and rxbuf_max are no longer configurable. */
  1724. return len;
  1725. }
  1726. static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
  1727. static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
  1728. static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
  1729. static struct attribute *xennet_dev_attrs[] = {
  1730. &dev_attr_rxbuf_min.attr,
  1731. &dev_attr_rxbuf_max.attr,
  1732. &dev_attr_rxbuf_cur.attr,
  1733. NULL
  1734. };
  1735. static const struct attribute_group xennet_dev_group = {
  1736. .attrs = xennet_dev_attrs
  1737. };
  1738. #endif /* CONFIG_SYSFS */
  1739. static int xennet_remove(struct xenbus_device *dev)
  1740. {
  1741. struct netfront_info *info = dev_get_drvdata(&dev->dev);
  1742. dev_dbg(&dev->dev, "%s\n", dev->nodename);
  1743. xennet_disconnect_backend(info);
  1744. unregister_netdev(info->netdev);
  1745. if (info->queues)
  1746. xennet_destroy_queues(info);
  1747. xennet_free_netdev(info->netdev);
  1748. return 0;
  1749. }
  1750. static const struct xenbus_device_id netfront_ids[] = {
  1751. { "vif" },
  1752. { "" }
  1753. };
  1754. static struct xenbus_driver netfront_driver = {
  1755. .ids = netfront_ids,
  1756. .probe = netfront_probe,
  1757. .remove = xennet_remove,
  1758. .resume = netfront_resume,
  1759. .otherend_changed = netback_changed,
  1760. };
  1761. static int __init netif_init(void)
  1762. {
  1763. if (!xen_domain())
  1764. return -ENODEV;
  1765. if (!xen_has_pv_nic_devices())
  1766. return -ENODEV;
  1767. pr_info("Initialising Xen virtual ethernet driver\n");
  1768. /* Allow as many queues as there are CPUs inut max. 8 if user has not
  1769. * specified a value.
  1770. */
  1771. if (xennet_max_queues == 0)
  1772. xennet_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
  1773. num_online_cpus());
  1774. return xenbus_register_frontend(&netfront_driver);
  1775. }
  1776. module_init(netif_init);
  1777. static void __exit netif_exit(void)
  1778. {
  1779. xenbus_unregister_driver(&netfront_driver);
  1780. }
  1781. module_exit(netif_exit);
  1782. MODULE_DESCRIPTION("Xen virtual network device frontend");
  1783. MODULE_LICENSE("GPL");
  1784. MODULE_ALIAS("xen:vif");
  1785. MODULE_ALIAS("xennet");