netback.c 62 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431
  1. /*
  2. * Back-end of the driver for virtual network devices. This portion of the
  3. * driver exports a 'unified' network-device interface that can be accessed
  4. * by any operating system that implements a compatible front end. A
  5. * reference front-end implementation can be found in:
  6. * drivers/net/xen-netfront.c
  7. *
  8. * Copyright (c) 2002-2005, K A Fraser
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License version 2
  12. * as published by the Free Software Foundation; or, when distributed
  13. * separately from the Linux kernel or incorporated into other
  14. * software packages, subject to the following license:
  15. *
  16. * Permission is hereby granted, free of charge, to any person obtaining a copy
  17. * of this source file (the "Software"), to deal in the Software without
  18. * restriction, including without limitation the rights to use, copy, modify,
  19. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20. * and to permit persons to whom the Software is furnished to do so, subject to
  21. * the following conditions:
  22. *
  23. * The above copyright notice and this permission notice shall be included in
  24. * all copies or substantial portions of the Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32. * IN THE SOFTWARE.
  33. */
  34. #include "common.h"
  35. #include <linux/kthread.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/udp.h>
  38. #include <linux/highmem.h>
  39. #include <net/tcp.h>
  40. #include <xen/xen.h>
  41. #include <xen/events.h>
  42. #include <xen/interface/memory.h>
  43. #include <xen/page.h>
  44. #include <asm/xen/hypercall.h>
  45. /* Provide an option to disable split event channels at load time as
  46. * event channels are limited resource. Split event channels are
  47. * enabled by default.
  48. */
  49. bool separate_tx_rx_irq = true;
  50. module_param(separate_tx_rx_irq, bool, 0644);
  51. /* The time that packets can stay on the guest Rx internal queue
  52. * before they are dropped.
  53. */
  54. unsigned int rx_drain_timeout_msecs = 10000;
  55. module_param(rx_drain_timeout_msecs, uint, 0444);
  56. /* The length of time before the frontend is considered unresponsive
  57. * because it isn't providing Rx slots.
  58. */
  59. unsigned int rx_stall_timeout_msecs = 60000;
  60. module_param(rx_stall_timeout_msecs, uint, 0444);
  61. unsigned int xenvif_max_queues;
  62. module_param_named(max_queues, xenvif_max_queues, uint, 0644);
  63. MODULE_PARM_DESC(max_queues,
  64. "Maximum number of queues per virtual interface");
  65. /*
  66. * This is the maximum slots a skb can have. If a guest sends a skb
  67. * which exceeds this limit it is considered malicious.
  68. */
  69. #define FATAL_SKB_SLOTS_DEFAULT 20
  70. static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  71. module_param(fatal_skb_slots, uint, 0444);
  72. /* The amount to copy out of the first guest Tx slot into the skb's
  73. * linear area. If the first slot has more data, it will be mapped
  74. * and put into the first frag.
  75. *
  76. * This is sized to avoid pulling headers from the frags for most
  77. * TCP/IP packets.
  78. */
  79. #define XEN_NETBACK_TX_COPY_LEN 128
  80. /* This is the maximum number of flows in the hash cache. */
  81. #define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
  82. unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
  83. module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
  84. MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
  85. static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
  86. u8 status);
  87. static void make_tx_response(struct xenvif_queue *queue,
  88. struct xen_netif_tx_request *txp,
  89. unsigned int extra_count,
  90. s8 st);
  91. static void push_tx_responses(struct xenvif_queue *queue);
  92. static inline int tx_work_todo(struct xenvif_queue *queue);
  93. static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
  94. u16 id,
  95. s8 st,
  96. u16 offset,
  97. u16 size,
  98. u16 flags);
  99. static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
  100. u16 idx)
  101. {
  102. return page_to_pfn(queue->mmap_pages[idx]);
  103. }
  104. static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
  105. u16 idx)
  106. {
  107. return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
  108. }
  109. #define callback_param(vif, pending_idx) \
  110. (vif->pending_tx_info[pending_idx].callback_struct)
  111. /* Find the containing VIF's structure from a pointer in pending_tx_info array
  112. */
  113. static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
  114. {
  115. u16 pending_idx = ubuf->desc;
  116. struct pending_tx_info *temp =
  117. container_of(ubuf, struct pending_tx_info, callback_struct);
  118. return container_of(temp - pending_idx,
  119. struct xenvif_queue,
  120. pending_tx_info[0]);
  121. }
  122. static u16 frag_get_pending_idx(skb_frag_t *frag)
  123. {
  124. return (u16)frag->page_offset;
  125. }
  126. static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
  127. {
  128. frag->page_offset = pending_idx;
  129. }
  130. static inline pending_ring_idx_t pending_index(unsigned i)
  131. {
  132. return i & (MAX_PENDING_REQS-1);
  133. }
  134. static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
  135. {
  136. RING_IDX prod, cons;
  137. struct sk_buff *skb;
  138. int needed;
  139. skb = skb_peek(&queue->rx_queue);
  140. if (!skb)
  141. return false;
  142. needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
  143. if (skb_is_gso(skb))
  144. needed++;
  145. if (skb->sw_hash)
  146. needed++;
  147. do {
  148. prod = queue->rx.sring->req_prod;
  149. cons = queue->rx.req_cons;
  150. if (prod - cons >= needed)
  151. return true;
  152. queue->rx.sring->req_event = prod + 1;
  153. /* Make sure event is visible before we check prod
  154. * again.
  155. */
  156. mb();
  157. } while (queue->rx.sring->req_prod != prod);
  158. return false;
  159. }
  160. void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
  161. {
  162. unsigned long flags;
  163. spin_lock_irqsave(&queue->rx_queue.lock, flags);
  164. __skb_queue_tail(&queue->rx_queue, skb);
  165. queue->rx_queue_len += skb->len;
  166. if (queue->rx_queue_len > queue->rx_queue_max)
  167. netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
  168. spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
  169. }
  170. static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
  171. {
  172. struct sk_buff *skb;
  173. spin_lock_irq(&queue->rx_queue.lock);
  174. skb = __skb_dequeue(&queue->rx_queue);
  175. if (skb)
  176. queue->rx_queue_len -= skb->len;
  177. spin_unlock_irq(&queue->rx_queue.lock);
  178. return skb;
  179. }
  180. static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
  181. {
  182. spin_lock_irq(&queue->rx_queue.lock);
  183. if (queue->rx_queue_len < queue->rx_queue_max)
  184. netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
  185. spin_unlock_irq(&queue->rx_queue.lock);
  186. }
  187. static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
  188. {
  189. struct sk_buff *skb;
  190. while ((skb = xenvif_rx_dequeue(queue)) != NULL)
  191. kfree_skb(skb);
  192. }
  193. static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
  194. {
  195. struct sk_buff *skb;
  196. for(;;) {
  197. skb = skb_peek(&queue->rx_queue);
  198. if (!skb)
  199. break;
  200. if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
  201. break;
  202. xenvif_rx_dequeue(queue);
  203. kfree_skb(skb);
  204. }
  205. }
  206. struct netrx_pending_operations {
  207. unsigned copy_prod, copy_cons;
  208. unsigned meta_prod, meta_cons;
  209. struct gnttab_copy *copy;
  210. struct xenvif_rx_meta *meta;
  211. int copy_off;
  212. grant_ref_t copy_gref;
  213. };
  214. static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
  215. struct netrx_pending_operations *npo)
  216. {
  217. struct xenvif_rx_meta *meta;
  218. struct xen_netif_rx_request req;
  219. RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
  220. meta = npo->meta + npo->meta_prod++;
  221. meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
  222. meta->gso_size = 0;
  223. meta->size = 0;
  224. meta->id = req.id;
  225. npo->copy_off = 0;
  226. npo->copy_gref = req.gref;
  227. return meta;
  228. }
  229. struct gop_frag_copy {
  230. struct xenvif_queue *queue;
  231. struct netrx_pending_operations *npo;
  232. struct xenvif_rx_meta *meta;
  233. int head;
  234. int gso_type;
  235. int protocol;
  236. int hash_present;
  237. struct page *page;
  238. };
  239. static void xenvif_setup_copy_gop(unsigned long gfn,
  240. unsigned int offset,
  241. unsigned int *len,
  242. struct gop_frag_copy *info)
  243. {
  244. struct gnttab_copy *copy_gop;
  245. struct xen_page_foreign *foreign;
  246. /* Convenient aliases */
  247. struct xenvif_queue *queue = info->queue;
  248. struct netrx_pending_operations *npo = info->npo;
  249. struct page *page = info->page;
  250. BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
  251. if (npo->copy_off == MAX_BUFFER_OFFSET)
  252. info->meta = get_next_rx_buffer(queue, npo);
  253. if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
  254. *len = MAX_BUFFER_OFFSET - npo->copy_off;
  255. copy_gop = npo->copy + npo->copy_prod++;
  256. copy_gop->flags = GNTCOPY_dest_gref;
  257. copy_gop->len = *len;
  258. foreign = xen_page_foreign(page);
  259. if (foreign) {
  260. copy_gop->source.domid = foreign->domid;
  261. copy_gop->source.u.ref = foreign->gref;
  262. copy_gop->flags |= GNTCOPY_source_gref;
  263. } else {
  264. copy_gop->source.domid = DOMID_SELF;
  265. copy_gop->source.u.gmfn = gfn;
  266. }
  267. copy_gop->source.offset = offset;
  268. copy_gop->dest.domid = queue->vif->domid;
  269. copy_gop->dest.offset = npo->copy_off;
  270. copy_gop->dest.u.ref = npo->copy_gref;
  271. npo->copy_off += *len;
  272. info->meta->size += *len;
  273. if (!info->head)
  274. return;
  275. /* Leave a gap for the GSO descriptor. */
  276. if ((1 << info->gso_type) & queue->vif->gso_mask)
  277. queue->rx.req_cons++;
  278. /* Leave a gap for the hash extra segment. */
  279. if (info->hash_present)
  280. queue->rx.req_cons++;
  281. info->head = 0; /* There must be something in this buffer now */
  282. }
  283. static void xenvif_gop_frag_copy_grant(unsigned long gfn,
  284. unsigned offset,
  285. unsigned int len,
  286. void *data)
  287. {
  288. unsigned int bytes;
  289. while (len) {
  290. bytes = len;
  291. xenvif_setup_copy_gop(gfn, offset, &bytes, data);
  292. offset += bytes;
  293. len -= bytes;
  294. }
  295. }
  296. /*
  297. * Set up the grant operations for this fragment. If it's a flipping
  298. * interface, we also set up the unmap request from here.
  299. */
  300. static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
  301. struct netrx_pending_operations *npo,
  302. struct page *page, unsigned long size,
  303. unsigned long offset, int *head)
  304. {
  305. struct gop_frag_copy info = {
  306. .queue = queue,
  307. .npo = npo,
  308. .head = *head,
  309. .gso_type = XEN_NETIF_GSO_TYPE_NONE,
  310. /* xenvif_set_skb_hash() will have either set a s/w
  311. * hash or cleared the hash depending on
  312. * whether the the frontend wants a hash for this skb.
  313. */
  314. .hash_present = skb->sw_hash,
  315. };
  316. unsigned long bytes;
  317. if (skb_is_gso(skb)) {
  318. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  319. info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
  320. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  321. info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
  322. }
  323. /* Data must not cross a page boundary. */
  324. BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
  325. info.meta = npo->meta + npo->meta_prod - 1;
  326. /* Skip unused frames from start of page */
  327. page += offset >> PAGE_SHIFT;
  328. offset &= ~PAGE_MASK;
  329. while (size > 0) {
  330. BUG_ON(offset >= PAGE_SIZE);
  331. bytes = PAGE_SIZE - offset;
  332. if (bytes > size)
  333. bytes = size;
  334. info.page = page;
  335. gnttab_foreach_grant_in_range(page, offset, bytes,
  336. xenvif_gop_frag_copy_grant,
  337. &info);
  338. size -= bytes;
  339. offset = 0;
  340. /* Next page */
  341. if (size) {
  342. BUG_ON(!PageCompound(page));
  343. page++;
  344. }
  345. }
  346. *head = info.head;
  347. }
  348. /*
  349. * Prepare an SKB to be transmitted to the frontend.
  350. *
  351. * This function is responsible for allocating grant operations, meta
  352. * structures, etc.
  353. *
  354. * It returns the number of meta structures consumed. The number of
  355. * ring slots used is always equal to the number of meta slots used
  356. * plus the number of GSO descriptors used. Currently, we use either
  357. * zero GSO descriptors (for non-GSO packets) or one descriptor (for
  358. * frontend-side LRO).
  359. */
  360. static int xenvif_gop_skb(struct sk_buff *skb,
  361. struct netrx_pending_operations *npo,
  362. struct xenvif_queue *queue)
  363. {
  364. struct xenvif *vif = netdev_priv(skb->dev);
  365. int nr_frags = skb_shinfo(skb)->nr_frags;
  366. int i;
  367. struct xen_netif_rx_request req;
  368. struct xenvif_rx_meta *meta;
  369. unsigned char *data;
  370. int head = 1;
  371. int old_meta_prod;
  372. int gso_type;
  373. old_meta_prod = npo->meta_prod;
  374. gso_type = XEN_NETIF_GSO_TYPE_NONE;
  375. if (skb_is_gso(skb)) {
  376. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  377. gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
  378. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  379. gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
  380. }
  381. /* Set up a GSO prefix descriptor, if necessary */
  382. if ((1 << gso_type) & vif->gso_prefix_mask) {
  383. RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
  384. meta = npo->meta + npo->meta_prod++;
  385. meta->gso_type = gso_type;
  386. meta->gso_size = skb_shinfo(skb)->gso_size;
  387. meta->size = 0;
  388. meta->id = req.id;
  389. }
  390. RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
  391. meta = npo->meta + npo->meta_prod++;
  392. if ((1 << gso_type) & vif->gso_mask) {
  393. meta->gso_type = gso_type;
  394. meta->gso_size = skb_shinfo(skb)->gso_size;
  395. } else {
  396. meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
  397. meta->gso_size = 0;
  398. }
  399. meta->size = 0;
  400. meta->id = req.id;
  401. npo->copy_off = 0;
  402. npo->copy_gref = req.gref;
  403. data = skb->data;
  404. while (data < skb_tail_pointer(skb)) {
  405. unsigned int offset = offset_in_page(data);
  406. unsigned int len = PAGE_SIZE - offset;
  407. if (data + len > skb_tail_pointer(skb))
  408. len = skb_tail_pointer(skb) - data;
  409. xenvif_gop_frag_copy(queue, skb, npo,
  410. virt_to_page(data), len, offset, &head);
  411. data += len;
  412. }
  413. for (i = 0; i < nr_frags; i++) {
  414. xenvif_gop_frag_copy(queue, skb, npo,
  415. skb_frag_page(&skb_shinfo(skb)->frags[i]),
  416. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  417. skb_shinfo(skb)->frags[i].page_offset,
  418. &head);
  419. }
  420. return npo->meta_prod - old_meta_prod;
  421. }
  422. /*
  423. * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
  424. * used to set up the operations on the top of
  425. * netrx_pending_operations, which have since been done. Check that
  426. * they didn't give any errors and advance over them.
  427. */
  428. static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
  429. struct netrx_pending_operations *npo)
  430. {
  431. struct gnttab_copy *copy_op;
  432. int status = XEN_NETIF_RSP_OKAY;
  433. int i;
  434. for (i = 0; i < nr_meta_slots; i++) {
  435. copy_op = npo->copy + npo->copy_cons++;
  436. if (copy_op->status != GNTST_okay) {
  437. netdev_dbg(vif->dev,
  438. "Bad status %d from copy to DOM%d.\n",
  439. copy_op->status, vif->domid);
  440. status = XEN_NETIF_RSP_ERROR;
  441. }
  442. }
  443. return status;
  444. }
  445. static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
  446. struct xenvif_rx_meta *meta,
  447. int nr_meta_slots)
  448. {
  449. int i;
  450. unsigned long offset;
  451. /* No fragments used */
  452. if (nr_meta_slots <= 1)
  453. return;
  454. nr_meta_slots--;
  455. for (i = 0; i < nr_meta_slots; i++) {
  456. int flags;
  457. if (i == nr_meta_slots - 1)
  458. flags = 0;
  459. else
  460. flags = XEN_NETRXF_more_data;
  461. offset = 0;
  462. make_rx_response(queue, meta[i].id, status, offset,
  463. meta[i].size, flags);
  464. }
  465. }
  466. void xenvif_kick_thread(struct xenvif_queue *queue)
  467. {
  468. wake_up(&queue->wq);
  469. }
  470. static void xenvif_rx_action(struct xenvif_queue *queue)
  471. {
  472. struct xenvif *vif = queue->vif;
  473. s8 status;
  474. u16 flags;
  475. struct xen_netif_rx_response *resp;
  476. struct sk_buff_head rxq;
  477. struct sk_buff *skb;
  478. LIST_HEAD(notify);
  479. int ret;
  480. unsigned long offset;
  481. bool need_to_notify = false;
  482. struct netrx_pending_operations npo = {
  483. .copy = queue->grant_copy_op,
  484. .meta = queue->meta,
  485. };
  486. skb_queue_head_init(&rxq);
  487. while (xenvif_rx_ring_slots_available(queue)
  488. && (skb = xenvif_rx_dequeue(queue)) != NULL) {
  489. queue->last_rx_time = jiffies;
  490. XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
  491. __skb_queue_tail(&rxq, skb);
  492. }
  493. BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
  494. if (!npo.copy_prod)
  495. goto done;
  496. BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
  497. gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
  498. while ((skb = __skb_dequeue(&rxq)) != NULL) {
  499. struct xen_netif_extra_info *extra = NULL;
  500. if ((1 << queue->meta[npo.meta_cons].gso_type) &
  501. vif->gso_prefix_mask) {
  502. resp = RING_GET_RESPONSE(&queue->rx,
  503. queue->rx.rsp_prod_pvt++);
  504. resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
  505. resp->offset = queue->meta[npo.meta_cons].gso_size;
  506. resp->id = queue->meta[npo.meta_cons].id;
  507. resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
  508. npo.meta_cons++;
  509. XENVIF_RX_CB(skb)->meta_slots_used--;
  510. }
  511. queue->stats.tx_bytes += skb->len;
  512. queue->stats.tx_packets++;
  513. status = xenvif_check_gop(vif,
  514. XENVIF_RX_CB(skb)->meta_slots_used,
  515. &npo);
  516. if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
  517. flags = 0;
  518. else
  519. flags = XEN_NETRXF_more_data;
  520. if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
  521. flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
  522. else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  523. /* remote but checksummed. */
  524. flags |= XEN_NETRXF_data_validated;
  525. offset = 0;
  526. resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
  527. status, offset,
  528. queue->meta[npo.meta_cons].size,
  529. flags);
  530. if ((1 << queue->meta[npo.meta_cons].gso_type) &
  531. vif->gso_mask) {
  532. extra = (struct xen_netif_extra_info *)
  533. RING_GET_RESPONSE(&queue->rx,
  534. queue->rx.rsp_prod_pvt++);
  535. resp->flags |= XEN_NETRXF_extra_info;
  536. extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
  537. extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
  538. extra->u.gso.pad = 0;
  539. extra->u.gso.features = 0;
  540. extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
  541. extra->flags = 0;
  542. }
  543. if (skb->sw_hash) {
  544. /* Since the skb got here via xenvif_select_queue()
  545. * we know that the hash has been re-calculated
  546. * according to a configuration set by the frontend
  547. * and therefore we know that it is legitimate to
  548. * pass it to the frontend.
  549. */
  550. if (resp->flags & XEN_NETRXF_extra_info)
  551. extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
  552. else
  553. resp->flags |= XEN_NETRXF_extra_info;
  554. extra = (struct xen_netif_extra_info *)
  555. RING_GET_RESPONSE(&queue->rx,
  556. queue->rx.rsp_prod_pvt++);
  557. extra->u.hash.algorithm =
  558. XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
  559. if (skb->l4_hash)
  560. extra->u.hash.type =
  561. skb->protocol == htons(ETH_P_IP) ?
  562. _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
  563. _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
  564. else
  565. extra->u.hash.type =
  566. skb->protocol == htons(ETH_P_IP) ?
  567. _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
  568. _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
  569. *(uint32_t *)extra->u.hash.value =
  570. skb_get_hash_raw(skb);
  571. extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
  572. extra->flags = 0;
  573. }
  574. xenvif_add_frag_responses(queue, status,
  575. queue->meta + npo.meta_cons + 1,
  576. XENVIF_RX_CB(skb)->meta_slots_used);
  577. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
  578. need_to_notify |= !!ret;
  579. npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
  580. dev_kfree_skb(skb);
  581. }
  582. done:
  583. if (need_to_notify)
  584. notify_remote_via_irq(queue->rx_irq);
  585. }
  586. void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
  587. {
  588. int more_to_do;
  589. RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
  590. if (more_to_do)
  591. napi_schedule(&queue->napi);
  592. }
  593. static void tx_add_credit(struct xenvif_queue *queue)
  594. {
  595. unsigned long max_burst, max_credit;
  596. /*
  597. * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
  598. * Otherwise the interface can seize up due to insufficient credit.
  599. */
  600. max_burst = max(131072UL, queue->credit_bytes);
  601. /* Take care that adding a new chunk of credit doesn't wrap to zero. */
  602. max_credit = queue->remaining_credit + queue->credit_bytes;
  603. if (max_credit < queue->remaining_credit)
  604. max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
  605. queue->remaining_credit = min(max_credit, max_burst);
  606. }
  607. void xenvif_tx_credit_callback(unsigned long data)
  608. {
  609. struct xenvif_queue *queue = (struct xenvif_queue *)data;
  610. tx_add_credit(queue);
  611. xenvif_napi_schedule_or_enable_events(queue);
  612. }
  613. static void xenvif_tx_err(struct xenvif_queue *queue,
  614. struct xen_netif_tx_request *txp,
  615. unsigned int extra_count, RING_IDX end)
  616. {
  617. RING_IDX cons = queue->tx.req_cons;
  618. unsigned long flags;
  619. do {
  620. spin_lock_irqsave(&queue->response_lock, flags);
  621. make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
  622. push_tx_responses(queue);
  623. spin_unlock_irqrestore(&queue->response_lock, flags);
  624. if (cons == end)
  625. break;
  626. RING_COPY_REQUEST(&queue->tx, cons++, txp);
  627. extra_count = 0; /* only the first frag can have extras */
  628. } while (1);
  629. queue->tx.req_cons = cons;
  630. }
  631. static void xenvif_fatal_tx_err(struct xenvif *vif)
  632. {
  633. netdev_err(vif->dev, "fatal error; disabling device\n");
  634. vif->disabled = true;
  635. /* Disable the vif from queue 0's kthread */
  636. if (vif->queues)
  637. xenvif_kick_thread(&vif->queues[0]);
  638. }
  639. static int xenvif_count_requests(struct xenvif_queue *queue,
  640. struct xen_netif_tx_request *first,
  641. unsigned int extra_count,
  642. struct xen_netif_tx_request *txp,
  643. int work_to_do)
  644. {
  645. RING_IDX cons = queue->tx.req_cons;
  646. int slots = 0;
  647. int drop_err = 0;
  648. int more_data;
  649. if (!(first->flags & XEN_NETTXF_more_data))
  650. return 0;
  651. do {
  652. struct xen_netif_tx_request dropped_tx = { 0 };
  653. if (slots >= work_to_do) {
  654. netdev_err(queue->vif->dev,
  655. "Asked for %d slots but exceeds this limit\n",
  656. work_to_do);
  657. xenvif_fatal_tx_err(queue->vif);
  658. return -ENODATA;
  659. }
  660. /* This guest is really using too many slots and
  661. * considered malicious.
  662. */
  663. if (unlikely(slots >= fatal_skb_slots)) {
  664. netdev_err(queue->vif->dev,
  665. "Malicious frontend using %d slots, threshold %u\n",
  666. slots, fatal_skb_slots);
  667. xenvif_fatal_tx_err(queue->vif);
  668. return -E2BIG;
  669. }
  670. /* Xen network protocol had implicit dependency on
  671. * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
  672. * the historical MAX_SKB_FRAGS value 18 to honor the
  673. * same behavior as before. Any packet using more than
  674. * 18 slots but less than fatal_skb_slots slots is
  675. * dropped
  676. */
  677. if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
  678. if (net_ratelimit())
  679. netdev_dbg(queue->vif->dev,
  680. "Too many slots (%d) exceeding limit (%d), dropping packet\n",
  681. slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  682. drop_err = -E2BIG;
  683. }
  684. if (drop_err)
  685. txp = &dropped_tx;
  686. RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
  687. /* If the guest submitted a frame >= 64 KiB then
  688. * first->size overflowed and following slots will
  689. * appear to be larger than the frame.
  690. *
  691. * This cannot be fatal error as there are buggy
  692. * frontends that do this.
  693. *
  694. * Consume all slots and drop the packet.
  695. */
  696. if (!drop_err && txp->size > first->size) {
  697. if (net_ratelimit())
  698. netdev_dbg(queue->vif->dev,
  699. "Invalid tx request, slot size %u > remaining size %u\n",
  700. txp->size, first->size);
  701. drop_err = -EIO;
  702. }
  703. first->size -= txp->size;
  704. slots++;
  705. if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
  706. netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
  707. txp->offset, txp->size);
  708. xenvif_fatal_tx_err(queue->vif);
  709. return -EINVAL;
  710. }
  711. more_data = txp->flags & XEN_NETTXF_more_data;
  712. if (!drop_err)
  713. txp++;
  714. } while (more_data);
  715. if (drop_err) {
  716. xenvif_tx_err(queue, first, extra_count, cons + slots);
  717. return drop_err;
  718. }
  719. return slots;
  720. }
  721. struct xenvif_tx_cb {
  722. u16 pending_idx;
  723. };
  724. #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
  725. static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
  726. u16 pending_idx,
  727. struct xen_netif_tx_request *txp,
  728. unsigned int extra_count,
  729. struct gnttab_map_grant_ref *mop)
  730. {
  731. queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
  732. gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
  733. GNTMAP_host_map | GNTMAP_readonly,
  734. txp->gref, queue->vif->domid);
  735. memcpy(&queue->pending_tx_info[pending_idx].req, txp,
  736. sizeof(*txp));
  737. queue->pending_tx_info[pending_idx].extra_count = extra_count;
  738. }
  739. static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
  740. {
  741. struct sk_buff *skb =
  742. alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
  743. GFP_ATOMIC | __GFP_NOWARN);
  744. if (unlikely(skb == NULL))
  745. return NULL;
  746. /* Packets passed to netif_rx() must have some headroom. */
  747. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  748. /* Initialize it here to avoid later surprises */
  749. skb_shinfo(skb)->destructor_arg = NULL;
  750. return skb;
  751. }
  752. static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
  753. struct sk_buff *skb,
  754. struct xen_netif_tx_request *txp,
  755. struct gnttab_map_grant_ref *gop,
  756. unsigned int frag_overflow,
  757. struct sk_buff *nskb)
  758. {
  759. struct skb_shared_info *shinfo = skb_shinfo(skb);
  760. skb_frag_t *frags = shinfo->frags;
  761. u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  762. int start;
  763. pending_ring_idx_t index;
  764. unsigned int nr_slots;
  765. nr_slots = shinfo->nr_frags;
  766. /* Skip first skb fragment if it is on same page as header fragment. */
  767. start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
  768. for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
  769. shinfo->nr_frags++, txp++, gop++) {
  770. index = pending_index(queue->pending_cons++);
  771. pending_idx = queue->pending_ring[index];
  772. xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
  773. frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
  774. }
  775. if (frag_overflow) {
  776. shinfo = skb_shinfo(nskb);
  777. frags = shinfo->frags;
  778. for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
  779. shinfo->nr_frags++, txp++, gop++) {
  780. index = pending_index(queue->pending_cons++);
  781. pending_idx = queue->pending_ring[index];
  782. xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
  783. gop);
  784. frag_set_pending_idx(&frags[shinfo->nr_frags],
  785. pending_idx);
  786. }
  787. skb_shinfo(skb)->frag_list = nskb;
  788. }
  789. return gop;
  790. }
  791. static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
  792. u16 pending_idx,
  793. grant_handle_t handle)
  794. {
  795. if (unlikely(queue->grant_tx_handle[pending_idx] !=
  796. NETBACK_INVALID_HANDLE)) {
  797. netdev_err(queue->vif->dev,
  798. "Trying to overwrite active handle! pending_idx: 0x%x\n",
  799. pending_idx);
  800. BUG();
  801. }
  802. queue->grant_tx_handle[pending_idx] = handle;
  803. }
  804. static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
  805. u16 pending_idx)
  806. {
  807. if (unlikely(queue->grant_tx_handle[pending_idx] ==
  808. NETBACK_INVALID_HANDLE)) {
  809. netdev_err(queue->vif->dev,
  810. "Trying to unmap invalid handle! pending_idx: 0x%x\n",
  811. pending_idx);
  812. BUG();
  813. }
  814. queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
  815. }
  816. static int xenvif_tx_check_gop(struct xenvif_queue *queue,
  817. struct sk_buff *skb,
  818. struct gnttab_map_grant_ref **gopp_map,
  819. struct gnttab_copy **gopp_copy)
  820. {
  821. struct gnttab_map_grant_ref *gop_map = *gopp_map;
  822. u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  823. /* This always points to the shinfo of the skb being checked, which
  824. * could be either the first or the one on the frag_list
  825. */
  826. struct skb_shared_info *shinfo = skb_shinfo(skb);
  827. /* If this is non-NULL, we are currently checking the frag_list skb, and
  828. * this points to the shinfo of the first one
  829. */
  830. struct skb_shared_info *first_shinfo = NULL;
  831. int nr_frags = shinfo->nr_frags;
  832. const bool sharedslot = nr_frags &&
  833. frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
  834. int i, err;
  835. /* Check status of header. */
  836. err = (*gopp_copy)->status;
  837. if (unlikely(err)) {
  838. if (net_ratelimit())
  839. netdev_dbg(queue->vif->dev,
  840. "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
  841. (*gopp_copy)->status,
  842. pending_idx,
  843. (*gopp_copy)->source.u.ref);
  844. /* The first frag might still have this slot mapped */
  845. if (!sharedslot)
  846. xenvif_idx_release(queue, pending_idx,
  847. XEN_NETIF_RSP_ERROR);
  848. }
  849. (*gopp_copy)++;
  850. check_frags:
  851. for (i = 0; i < nr_frags; i++, gop_map++) {
  852. int j, newerr;
  853. pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
  854. /* Check error status: if okay then remember grant handle. */
  855. newerr = gop_map->status;
  856. if (likely(!newerr)) {
  857. xenvif_grant_handle_set(queue,
  858. pending_idx,
  859. gop_map->handle);
  860. /* Had a previous error? Invalidate this fragment. */
  861. if (unlikely(err)) {
  862. xenvif_idx_unmap(queue, pending_idx);
  863. /* If the mapping of the first frag was OK, but
  864. * the header's copy failed, and they are
  865. * sharing a slot, send an error
  866. */
  867. if (i == 0 && sharedslot)
  868. xenvif_idx_release(queue, pending_idx,
  869. XEN_NETIF_RSP_ERROR);
  870. else
  871. xenvif_idx_release(queue, pending_idx,
  872. XEN_NETIF_RSP_OKAY);
  873. }
  874. continue;
  875. }
  876. /* Error on this fragment: respond to client with an error. */
  877. if (net_ratelimit())
  878. netdev_dbg(queue->vif->dev,
  879. "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
  880. i,
  881. gop_map->status,
  882. pending_idx,
  883. gop_map->ref);
  884. xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
  885. /* Not the first error? Preceding frags already invalidated. */
  886. if (err)
  887. continue;
  888. /* First error: if the header haven't shared a slot with the
  889. * first frag, release it as well.
  890. */
  891. if (!sharedslot)
  892. xenvif_idx_release(queue,
  893. XENVIF_TX_CB(skb)->pending_idx,
  894. XEN_NETIF_RSP_OKAY);
  895. /* Invalidate preceding fragments of this skb. */
  896. for (j = 0; j < i; j++) {
  897. pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
  898. xenvif_idx_unmap(queue, pending_idx);
  899. xenvif_idx_release(queue, pending_idx,
  900. XEN_NETIF_RSP_OKAY);
  901. }
  902. /* And if we found the error while checking the frag_list, unmap
  903. * the first skb's frags
  904. */
  905. if (first_shinfo) {
  906. for (j = 0; j < first_shinfo->nr_frags; j++) {
  907. pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
  908. xenvif_idx_unmap(queue, pending_idx);
  909. xenvif_idx_release(queue, pending_idx,
  910. XEN_NETIF_RSP_OKAY);
  911. }
  912. }
  913. /* Remember the error: invalidate all subsequent fragments. */
  914. err = newerr;
  915. }
  916. if (skb_has_frag_list(skb) && !first_shinfo) {
  917. first_shinfo = skb_shinfo(skb);
  918. shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
  919. nr_frags = shinfo->nr_frags;
  920. goto check_frags;
  921. }
  922. *gopp_map = gop_map;
  923. return err;
  924. }
  925. static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
  926. {
  927. struct skb_shared_info *shinfo = skb_shinfo(skb);
  928. int nr_frags = shinfo->nr_frags;
  929. int i;
  930. u16 prev_pending_idx = INVALID_PENDING_IDX;
  931. for (i = 0; i < nr_frags; i++) {
  932. skb_frag_t *frag = shinfo->frags + i;
  933. struct xen_netif_tx_request *txp;
  934. struct page *page;
  935. u16 pending_idx;
  936. pending_idx = frag_get_pending_idx(frag);
  937. /* If this is not the first frag, chain it to the previous*/
  938. if (prev_pending_idx == INVALID_PENDING_IDX)
  939. skb_shinfo(skb)->destructor_arg =
  940. &callback_param(queue, pending_idx);
  941. else
  942. callback_param(queue, prev_pending_idx).ctx =
  943. &callback_param(queue, pending_idx);
  944. callback_param(queue, pending_idx).ctx = NULL;
  945. prev_pending_idx = pending_idx;
  946. txp = &queue->pending_tx_info[pending_idx].req;
  947. page = virt_to_page(idx_to_kaddr(queue, pending_idx));
  948. __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
  949. skb->len += txp->size;
  950. skb->data_len += txp->size;
  951. skb->truesize += txp->size;
  952. /* Take an extra reference to offset network stack's put_page */
  953. get_page(queue->mmap_pages[pending_idx]);
  954. }
  955. }
  956. static int xenvif_get_extras(struct xenvif_queue *queue,
  957. struct xen_netif_extra_info *extras,
  958. unsigned int *extra_count,
  959. int work_to_do)
  960. {
  961. struct xen_netif_extra_info extra;
  962. RING_IDX cons = queue->tx.req_cons;
  963. do {
  964. if (unlikely(work_to_do-- <= 0)) {
  965. netdev_err(queue->vif->dev, "Missing extra info\n");
  966. xenvif_fatal_tx_err(queue->vif);
  967. return -EBADR;
  968. }
  969. RING_COPY_REQUEST(&queue->tx, cons, &extra);
  970. queue->tx.req_cons = ++cons;
  971. (*extra_count)++;
  972. if (unlikely(!extra.type ||
  973. extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
  974. netdev_err(queue->vif->dev,
  975. "Invalid extra type: %d\n", extra.type);
  976. xenvif_fatal_tx_err(queue->vif);
  977. return -EINVAL;
  978. }
  979. memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
  980. } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
  981. return work_to_do;
  982. }
  983. static int xenvif_set_skb_gso(struct xenvif *vif,
  984. struct sk_buff *skb,
  985. struct xen_netif_extra_info *gso)
  986. {
  987. if (!gso->u.gso.size) {
  988. netdev_err(vif->dev, "GSO size must not be zero.\n");
  989. xenvif_fatal_tx_err(vif);
  990. return -EINVAL;
  991. }
  992. switch (gso->u.gso.type) {
  993. case XEN_NETIF_GSO_TYPE_TCPV4:
  994. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  995. break;
  996. case XEN_NETIF_GSO_TYPE_TCPV6:
  997. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  998. break;
  999. default:
  1000. netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
  1001. xenvif_fatal_tx_err(vif);
  1002. return -EINVAL;
  1003. }
  1004. skb_shinfo(skb)->gso_size = gso->u.gso.size;
  1005. /* gso_segs will be calculated later */
  1006. return 0;
  1007. }
  1008. static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
  1009. {
  1010. bool recalculate_partial_csum = false;
  1011. /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
  1012. * peers can fail to set NETRXF_csum_blank when sending a GSO
  1013. * frame. In this case force the SKB to CHECKSUM_PARTIAL and
  1014. * recalculate the partial checksum.
  1015. */
  1016. if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
  1017. queue->stats.rx_gso_checksum_fixup++;
  1018. skb->ip_summed = CHECKSUM_PARTIAL;
  1019. recalculate_partial_csum = true;
  1020. }
  1021. /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
  1022. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1023. return 0;
  1024. return skb_checksum_setup(skb, recalculate_partial_csum);
  1025. }
  1026. static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
  1027. {
  1028. u64 now = get_jiffies_64();
  1029. u64 next_credit = queue->credit_window_start +
  1030. msecs_to_jiffies(queue->credit_usec / 1000);
  1031. /* Timer could already be pending in rare cases. */
  1032. if (timer_pending(&queue->credit_timeout))
  1033. return true;
  1034. /* Passed the point where we can replenish credit? */
  1035. if (time_after_eq64(now, next_credit)) {
  1036. queue->credit_window_start = now;
  1037. tx_add_credit(queue);
  1038. }
  1039. /* Still too big to send right now? Set a callback. */
  1040. if (size > queue->remaining_credit) {
  1041. queue->credit_timeout.data =
  1042. (unsigned long)queue;
  1043. mod_timer(&queue->credit_timeout,
  1044. next_credit);
  1045. queue->credit_window_start = next_credit;
  1046. return true;
  1047. }
  1048. return false;
  1049. }
  1050. /* No locking is required in xenvif_mcast_add/del() as they are
  1051. * only ever invoked from NAPI poll. An RCU list is used because
  1052. * xenvif_mcast_match() is called asynchronously, during start_xmit.
  1053. */
  1054. static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
  1055. {
  1056. struct xenvif_mcast_addr *mcast;
  1057. if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
  1058. if (net_ratelimit())
  1059. netdev_err(vif->dev,
  1060. "Too many multicast addresses\n");
  1061. return -ENOSPC;
  1062. }
  1063. mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
  1064. if (!mcast)
  1065. return -ENOMEM;
  1066. ether_addr_copy(mcast->addr, addr);
  1067. list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
  1068. vif->fe_mcast_count++;
  1069. return 0;
  1070. }
  1071. static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
  1072. {
  1073. struct xenvif_mcast_addr *mcast;
  1074. list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
  1075. if (ether_addr_equal(addr, mcast->addr)) {
  1076. --vif->fe_mcast_count;
  1077. list_del_rcu(&mcast->entry);
  1078. kfree_rcu(mcast, rcu);
  1079. break;
  1080. }
  1081. }
  1082. }
  1083. bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
  1084. {
  1085. struct xenvif_mcast_addr *mcast;
  1086. rcu_read_lock();
  1087. list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
  1088. if (ether_addr_equal(addr, mcast->addr)) {
  1089. rcu_read_unlock();
  1090. return true;
  1091. }
  1092. }
  1093. rcu_read_unlock();
  1094. return false;
  1095. }
  1096. void xenvif_mcast_addr_list_free(struct xenvif *vif)
  1097. {
  1098. /* No need for locking or RCU here. NAPI poll and TX queue
  1099. * are stopped.
  1100. */
  1101. while (!list_empty(&vif->fe_mcast_addr)) {
  1102. struct xenvif_mcast_addr *mcast;
  1103. mcast = list_first_entry(&vif->fe_mcast_addr,
  1104. struct xenvif_mcast_addr,
  1105. entry);
  1106. --vif->fe_mcast_count;
  1107. list_del(&mcast->entry);
  1108. kfree(mcast);
  1109. }
  1110. }
  1111. static void xenvif_tx_build_gops(struct xenvif_queue *queue,
  1112. int budget,
  1113. unsigned *copy_ops,
  1114. unsigned *map_ops)
  1115. {
  1116. struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
  1117. struct sk_buff *skb, *nskb;
  1118. int ret;
  1119. unsigned int frag_overflow;
  1120. while (skb_queue_len(&queue->tx_queue) < budget) {
  1121. struct xen_netif_tx_request txreq;
  1122. struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
  1123. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
  1124. unsigned int extra_count;
  1125. u16 pending_idx;
  1126. RING_IDX idx;
  1127. int work_to_do;
  1128. unsigned int data_len;
  1129. pending_ring_idx_t index;
  1130. if (queue->tx.sring->req_prod - queue->tx.req_cons >
  1131. XEN_NETIF_TX_RING_SIZE) {
  1132. netdev_err(queue->vif->dev,
  1133. "Impossible number of requests. "
  1134. "req_prod %d, req_cons %d, size %ld\n",
  1135. queue->tx.sring->req_prod, queue->tx.req_cons,
  1136. XEN_NETIF_TX_RING_SIZE);
  1137. xenvif_fatal_tx_err(queue->vif);
  1138. break;
  1139. }
  1140. work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
  1141. if (!work_to_do)
  1142. break;
  1143. idx = queue->tx.req_cons;
  1144. rmb(); /* Ensure that we see the request before we copy it. */
  1145. RING_COPY_REQUEST(&queue->tx, idx, &txreq);
  1146. /* Credit-based scheduling. */
  1147. if (txreq.size > queue->remaining_credit &&
  1148. tx_credit_exceeded(queue, txreq.size))
  1149. break;
  1150. queue->remaining_credit -= txreq.size;
  1151. work_to_do--;
  1152. queue->tx.req_cons = ++idx;
  1153. memset(extras, 0, sizeof(extras));
  1154. extra_count = 0;
  1155. if (txreq.flags & XEN_NETTXF_extra_info) {
  1156. work_to_do = xenvif_get_extras(queue, extras,
  1157. &extra_count,
  1158. work_to_do);
  1159. idx = queue->tx.req_cons;
  1160. if (unlikely(work_to_do < 0))
  1161. break;
  1162. }
  1163. if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
  1164. struct xen_netif_extra_info *extra;
  1165. extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
  1166. ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
  1167. make_tx_response(queue, &txreq, extra_count,
  1168. (ret == 0) ?
  1169. XEN_NETIF_RSP_OKAY :
  1170. XEN_NETIF_RSP_ERROR);
  1171. push_tx_responses(queue);
  1172. continue;
  1173. }
  1174. if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
  1175. struct xen_netif_extra_info *extra;
  1176. extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
  1177. xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
  1178. make_tx_response(queue, &txreq, extra_count,
  1179. XEN_NETIF_RSP_OKAY);
  1180. push_tx_responses(queue);
  1181. continue;
  1182. }
  1183. ret = xenvif_count_requests(queue, &txreq, extra_count,
  1184. txfrags, work_to_do);
  1185. if (unlikely(ret < 0))
  1186. break;
  1187. idx += ret;
  1188. if (unlikely(txreq.size < ETH_HLEN)) {
  1189. netdev_dbg(queue->vif->dev,
  1190. "Bad packet size: %d\n", txreq.size);
  1191. xenvif_tx_err(queue, &txreq, extra_count, idx);
  1192. break;
  1193. }
  1194. /* No crossing a page as the payload mustn't fragment. */
  1195. if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
  1196. netdev_err(queue->vif->dev,
  1197. "txreq.offset: %u, size: %u, end: %lu\n",
  1198. txreq.offset, txreq.size,
  1199. (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
  1200. xenvif_fatal_tx_err(queue->vif);
  1201. break;
  1202. }
  1203. index = pending_index(queue->pending_cons);
  1204. pending_idx = queue->pending_ring[index];
  1205. data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
  1206. ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
  1207. XEN_NETBACK_TX_COPY_LEN : txreq.size;
  1208. skb = xenvif_alloc_skb(data_len);
  1209. if (unlikely(skb == NULL)) {
  1210. netdev_dbg(queue->vif->dev,
  1211. "Can't allocate a skb in start_xmit.\n");
  1212. xenvif_tx_err(queue, &txreq, extra_count, idx);
  1213. break;
  1214. }
  1215. skb_shinfo(skb)->nr_frags = ret;
  1216. if (data_len < txreq.size)
  1217. skb_shinfo(skb)->nr_frags++;
  1218. /* At this point shinfo->nr_frags is in fact the number of
  1219. * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
  1220. */
  1221. frag_overflow = 0;
  1222. nskb = NULL;
  1223. if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
  1224. frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
  1225. BUG_ON(frag_overflow > MAX_SKB_FRAGS);
  1226. skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
  1227. nskb = xenvif_alloc_skb(0);
  1228. if (unlikely(nskb == NULL)) {
  1229. kfree_skb(skb);
  1230. xenvif_tx_err(queue, &txreq, extra_count, idx);
  1231. if (net_ratelimit())
  1232. netdev_err(queue->vif->dev,
  1233. "Can't allocate the frag_list skb.\n");
  1234. break;
  1235. }
  1236. }
  1237. if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
  1238. struct xen_netif_extra_info *gso;
  1239. gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  1240. if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
  1241. /* Failure in xenvif_set_skb_gso is fatal. */
  1242. kfree_skb(skb);
  1243. kfree_skb(nskb);
  1244. break;
  1245. }
  1246. }
  1247. if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
  1248. struct xen_netif_extra_info *extra;
  1249. enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
  1250. extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
  1251. switch (extra->u.hash.type) {
  1252. case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
  1253. case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
  1254. type = PKT_HASH_TYPE_L3;
  1255. break;
  1256. case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
  1257. case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
  1258. type = PKT_HASH_TYPE_L4;
  1259. break;
  1260. default:
  1261. break;
  1262. }
  1263. if (type != PKT_HASH_TYPE_NONE)
  1264. skb_set_hash(skb,
  1265. *(u32 *)extra->u.hash.value,
  1266. type);
  1267. }
  1268. XENVIF_TX_CB(skb)->pending_idx = pending_idx;
  1269. __skb_put(skb, data_len);
  1270. queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
  1271. queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
  1272. queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
  1273. queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
  1274. virt_to_gfn(skb->data);
  1275. queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
  1276. queue->tx_copy_ops[*copy_ops].dest.offset =
  1277. offset_in_page(skb->data) & ~XEN_PAGE_MASK;
  1278. queue->tx_copy_ops[*copy_ops].len = data_len;
  1279. queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
  1280. (*copy_ops)++;
  1281. if (data_len < txreq.size) {
  1282. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1283. pending_idx);
  1284. xenvif_tx_create_map_op(queue, pending_idx, &txreq,
  1285. extra_count, gop);
  1286. gop++;
  1287. } else {
  1288. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1289. INVALID_PENDING_IDX);
  1290. memcpy(&queue->pending_tx_info[pending_idx].req,
  1291. &txreq, sizeof(txreq));
  1292. queue->pending_tx_info[pending_idx].extra_count =
  1293. extra_count;
  1294. }
  1295. queue->pending_cons++;
  1296. gop = xenvif_get_requests(queue, skb, txfrags, gop,
  1297. frag_overflow, nskb);
  1298. __skb_queue_tail(&queue->tx_queue, skb);
  1299. queue->tx.req_cons = idx;
  1300. if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
  1301. (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
  1302. break;
  1303. }
  1304. (*map_ops) = gop - queue->tx_map_ops;
  1305. return;
  1306. }
  1307. /* Consolidate skb with a frag_list into a brand new one with local pages on
  1308. * frags. Returns 0 or -ENOMEM if can't allocate new pages.
  1309. */
  1310. static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
  1311. {
  1312. unsigned int offset = skb_headlen(skb);
  1313. skb_frag_t frags[MAX_SKB_FRAGS];
  1314. int i, f;
  1315. struct ubuf_info *uarg;
  1316. struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
  1317. queue->stats.tx_zerocopy_sent += 2;
  1318. queue->stats.tx_frag_overflow++;
  1319. xenvif_fill_frags(queue, nskb);
  1320. /* Subtract frags size, we will correct it later */
  1321. skb->truesize -= skb->data_len;
  1322. skb->len += nskb->len;
  1323. skb->data_len += nskb->len;
  1324. /* create a brand new frags array and coalesce there */
  1325. for (i = 0; offset < skb->len; i++) {
  1326. struct page *page;
  1327. unsigned int len;
  1328. BUG_ON(i >= MAX_SKB_FRAGS);
  1329. page = alloc_page(GFP_ATOMIC);
  1330. if (!page) {
  1331. int j;
  1332. skb->truesize += skb->data_len;
  1333. for (j = 0; j < i; j++)
  1334. put_page(frags[j].page.p);
  1335. return -ENOMEM;
  1336. }
  1337. if (offset + PAGE_SIZE < skb->len)
  1338. len = PAGE_SIZE;
  1339. else
  1340. len = skb->len - offset;
  1341. if (skb_copy_bits(skb, offset, page_address(page), len))
  1342. BUG();
  1343. offset += len;
  1344. frags[i].page.p = page;
  1345. frags[i].page_offset = 0;
  1346. skb_frag_size_set(&frags[i], len);
  1347. }
  1348. /* Copied all the bits from the frag list -- free it. */
  1349. skb_frag_list_init(skb);
  1350. xenvif_skb_zerocopy_prepare(queue, nskb);
  1351. kfree_skb(nskb);
  1352. /* Release all the original (foreign) frags. */
  1353. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  1354. skb_frag_unref(skb, f);
  1355. uarg = skb_shinfo(skb)->destructor_arg;
  1356. /* increase inflight counter to offset decrement in callback */
  1357. atomic_inc(&queue->inflight_packets);
  1358. uarg->callback(uarg, true);
  1359. skb_shinfo(skb)->destructor_arg = NULL;
  1360. /* Fill the skb with the new (local) frags. */
  1361. memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
  1362. skb_shinfo(skb)->nr_frags = i;
  1363. skb->truesize += i * PAGE_SIZE;
  1364. return 0;
  1365. }
  1366. static int xenvif_tx_submit(struct xenvif_queue *queue)
  1367. {
  1368. struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
  1369. struct gnttab_copy *gop_copy = queue->tx_copy_ops;
  1370. struct sk_buff *skb;
  1371. int work_done = 0;
  1372. while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
  1373. struct xen_netif_tx_request *txp;
  1374. u16 pending_idx;
  1375. unsigned data_len;
  1376. pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  1377. txp = &queue->pending_tx_info[pending_idx].req;
  1378. /* Check the remap error code. */
  1379. if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
  1380. /* If there was an error, xenvif_tx_check_gop is
  1381. * expected to release all the frags which were mapped,
  1382. * so kfree_skb shouldn't do it again
  1383. */
  1384. skb_shinfo(skb)->nr_frags = 0;
  1385. if (skb_has_frag_list(skb)) {
  1386. struct sk_buff *nskb =
  1387. skb_shinfo(skb)->frag_list;
  1388. skb_shinfo(nskb)->nr_frags = 0;
  1389. }
  1390. kfree_skb(skb);
  1391. continue;
  1392. }
  1393. data_len = skb->len;
  1394. callback_param(queue, pending_idx).ctx = NULL;
  1395. if (data_len < txp->size) {
  1396. /* Append the packet payload as a fragment. */
  1397. txp->offset += data_len;
  1398. txp->size -= data_len;
  1399. } else {
  1400. /* Schedule a response immediately. */
  1401. xenvif_idx_release(queue, pending_idx,
  1402. XEN_NETIF_RSP_OKAY);
  1403. }
  1404. if (txp->flags & XEN_NETTXF_csum_blank)
  1405. skb->ip_summed = CHECKSUM_PARTIAL;
  1406. else if (txp->flags & XEN_NETTXF_data_validated)
  1407. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1408. xenvif_fill_frags(queue, skb);
  1409. if (unlikely(skb_has_frag_list(skb))) {
  1410. if (xenvif_handle_frag_list(queue, skb)) {
  1411. if (net_ratelimit())
  1412. netdev_err(queue->vif->dev,
  1413. "Not enough memory to consolidate frag_list!\n");
  1414. xenvif_skb_zerocopy_prepare(queue, skb);
  1415. kfree_skb(skb);
  1416. continue;
  1417. }
  1418. }
  1419. skb->dev = queue->vif->dev;
  1420. skb->protocol = eth_type_trans(skb, skb->dev);
  1421. skb_reset_network_header(skb);
  1422. if (checksum_setup(queue, skb)) {
  1423. netdev_dbg(queue->vif->dev,
  1424. "Can't setup checksum in net_tx_action\n");
  1425. /* We have to set this flag to trigger the callback */
  1426. if (skb_shinfo(skb)->destructor_arg)
  1427. xenvif_skb_zerocopy_prepare(queue, skb);
  1428. kfree_skb(skb);
  1429. continue;
  1430. }
  1431. skb_probe_transport_header(skb, 0);
  1432. /* If the packet is GSO then we will have just set up the
  1433. * transport header offset in checksum_setup so it's now
  1434. * straightforward to calculate gso_segs.
  1435. */
  1436. if (skb_is_gso(skb)) {
  1437. int mss = skb_shinfo(skb)->gso_size;
  1438. int hdrlen = skb_transport_header(skb) -
  1439. skb_mac_header(skb) +
  1440. tcp_hdrlen(skb);
  1441. skb_shinfo(skb)->gso_segs =
  1442. DIV_ROUND_UP(skb->len - hdrlen, mss);
  1443. }
  1444. queue->stats.rx_bytes += skb->len;
  1445. queue->stats.rx_packets++;
  1446. work_done++;
  1447. /* Set this flag right before netif_receive_skb, otherwise
  1448. * someone might think this packet already left netback, and
  1449. * do a skb_copy_ubufs while we are still in control of the
  1450. * skb. E.g. the __pskb_pull_tail earlier can do such thing.
  1451. */
  1452. if (skb_shinfo(skb)->destructor_arg) {
  1453. xenvif_skb_zerocopy_prepare(queue, skb);
  1454. queue->stats.tx_zerocopy_sent++;
  1455. }
  1456. netif_receive_skb(skb);
  1457. }
  1458. return work_done;
  1459. }
  1460. void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
  1461. {
  1462. unsigned long flags;
  1463. pending_ring_idx_t index;
  1464. struct xenvif_queue *queue = ubuf_to_queue(ubuf);
  1465. /* This is the only place where we grab this lock, to protect callbacks
  1466. * from each other.
  1467. */
  1468. spin_lock_irqsave(&queue->callback_lock, flags);
  1469. do {
  1470. u16 pending_idx = ubuf->desc;
  1471. ubuf = (struct ubuf_info *) ubuf->ctx;
  1472. BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
  1473. MAX_PENDING_REQS);
  1474. index = pending_index(queue->dealloc_prod);
  1475. queue->dealloc_ring[index] = pending_idx;
  1476. /* Sync with xenvif_tx_dealloc_action:
  1477. * insert idx then incr producer.
  1478. */
  1479. smp_wmb();
  1480. queue->dealloc_prod++;
  1481. } while (ubuf);
  1482. spin_unlock_irqrestore(&queue->callback_lock, flags);
  1483. if (likely(zerocopy_success))
  1484. queue->stats.tx_zerocopy_success++;
  1485. else
  1486. queue->stats.tx_zerocopy_fail++;
  1487. xenvif_skb_zerocopy_complete(queue);
  1488. }
  1489. static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
  1490. {
  1491. struct gnttab_unmap_grant_ref *gop;
  1492. pending_ring_idx_t dc, dp;
  1493. u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
  1494. unsigned int i = 0;
  1495. dc = queue->dealloc_cons;
  1496. gop = queue->tx_unmap_ops;
  1497. /* Free up any grants we have finished using */
  1498. do {
  1499. dp = queue->dealloc_prod;
  1500. /* Ensure we see all indices enqueued by all
  1501. * xenvif_zerocopy_callback().
  1502. */
  1503. smp_rmb();
  1504. while (dc != dp) {
  1505. BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
  1506. pending_idx =
  1507. queue->dealloc_ring[pending_index(dc++)];
  1508. pending_idx_release[gop - queue->tx_unmap_ops] =
  1509. pending_idx;
  1510. queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
  1511. queue->mmap_pages[pending_idx];
  1512. gnttab_set_unmap_op(gop,
  1513. idx_to_kaddr(queue, pending_idx),
  1514. GNTMAP_host_map,
  1515. queue->grant_tx_handle[pending_idx]);
  1516. xenvif_grant_handle_reset(queue, pending_idx);
  1517. ++gop;
  1518. }
  1519. } while (dp != queue->dealloc_prod);
  1520. queue->dealloc_cons = dc;
  1521. if (gop - queue->tx_unmap_ops > 0) {
  1522. int ret;
  1523. ret = gnttab_unmap_refs(queue->tx_unmap_ops,
  1524. NULL,
  1525. queue->pages_to_unmap,
  1526. gop - queue->tx_unmap_ops);
  1527. if (ret) {
  1528. netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
  1529. gop - queue->tx_unmap_ops, ret);
  1530. for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
  1531. if (gop[i].status != GNTST_okay)
  1532. netdev_err(queue->vif->dev,
  1533. " host_addr: 0x%llx handle: 0x%x status: %d\n",
  1534. gop[i].host_addr,
  1535. gop[i].handle,
  1536. gop[i].status);
  1537. }
  1538. BUG();
  1539. }
  1540. }
  1541. for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
  1542. xenvif_idx_release(queue, pending_idx_release[i],
  1543. XEN_NETIF_RSP_OKAY);
  1544. }
  1545. /* Called after netfront has transmitted */
  1546. int xenvif_tx_action(struct xenvif_queue *queue, int budget)
  1547. {
  1548. unsigned nr_mops, nr_cops = 0;
  1549. int work_done, ret;
  1550. if (unlikely(!tx_work_todo(queue)))
  1551. return 0;
  1552. xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
  1553. if (nr_cops == 0)
  1554. return 0;
  1555. gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
  1556. if (nr_mops != 0) {
  1557. ret = gnttab_map_refs(queue->tx_map_ops,
  1558. NULL,
  1559. queue->pages_to_map,
  1560. nr_mops);
  1561. BUG_ON(ret);
  1562. }
  1563. work_done = xenvif_tx_submit(queue);
  1564. return work_done;
  1565. }
  1566. static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
  1567. u8 status)
  1568. {
  1569. struct pending_tx_info *pending_tx_info;
  1570. pending_ring_idx_t index;
  1571. unsigned long flags;
  1572. pending_tx_info = &queue->pending_tx_info[pending_idx];
  1573. spin_lock_irqsave(&queue->response_lock, flags);
  1574. make_tx_response(queue, &pending_tx_info->req,
  1575. pending_tx_info->extra_count, status);
  1576. /* Release the pending index before pusing the Tx response so
  1577. * its available before a new Tx request is pushed by the
  1578. * frontend.
  1579. */
  1580. index = pending_index(queue->pending_prod++);
  1581. queue->pending_ring[index] = pending_idx;
  1582. push_tx_responses(queue);
  1583. spin_unlock_irqrestore(&queue->response_lock, flags);
  1584. }
  1585. static void make_tx_response(struct xenvif_queue *queue,
  1586. struct xen_netif_tx_request *txp,
  1587. unsigned int extra_count,
  1588. s8 st)
  1589. {
  1590. RING_IDX i = queue->tx.rsp_prod_pvt;
  1591. struct xen_netif_tx_response *resp;
  1592. resp = RING_GET_RESPONSE(&queue->tx, i);
  1593. resp->id = txp->id;
  1594. resp->status = st;
  1595. while (extra_count-- != 0)
  1596. RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
  1597. queue->tx.rsp_prod_pvt = ++i;
  1598. }
  1599. static void push_tx_responses(struct xenvif_queue *queue)
  1600. {
  1601. int notify;
  1602. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
  1603. if (notify)
  1604. notify_remote_via_irq(queue->tx_irq);
  1605. }
  1606. static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
  1607. u16 id,
  1608. s8 st,
  1609. u16 offset,
  1610. u16 size,
  1611. u16 flags)
  1612. {
  1613. RING_IDX i = queue->rx.rsp_prod_pvt;
  1614. struct xen_netif_rx_response *resp;
  1615. resp = RING_GET_RESPONSE(&queue->rx, i);
  1616. resp->offset = offset;
  1617. resp->flags = flags;
  1618. resp->id = id;
  1619. resp->status = (s16)size;
  1620. if (st < 0)
  1621. resp->status = (s16)st;
  1622. queue->rx.rsp_prod_pvt = ++i;
  1623. return resp;
  1624. }
  1625. void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
  1626. {
  1627. int ret;
  1628. struct gnttab_unmap_grant_ref tx_unmap_op;
  1629. gnttab_set_unmap_op(&tx_unmap_op,
  1630. idx_to_kaddr(queue, pending_idx),
  1631. GNTMAP_host_map,
  1632. queue->grant_tx_handle[pending_idx]);
  1633. xenvif_grant_handle_reset(queue, pending_idx);
  1634. ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
  1635. &queue->mmap_pages[pending_idx], 1);
  1636. if (ret) {
  1637. netdev_err(queue->vif->dev,
  1638. "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
  1639. ret,
  1640. pending_idx,
  1641. tx_unmap_op.host_addr,
  1642. tx_unmap_op.handle,
  1643. tx_unmap_op.status);
  1644. BUG();
  1645. }
  1646. }
  1647. static inline int tx_work_todo(struct xenvif_queue *queue)
  1648. {
  1649. if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
  1650. return 1;
  1651. return 0;
  1652. }
  1653. static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
  1654. {
  1655. return queue->dealloc_cons != queue->dealloc_prod;
  1656. }
  1657. void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
  1658. {
  1659. if (queue->tx.sring)
  1660. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
  1661. queue->tx.sring);
  1662. if (queue->rx.sring)
  1663. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
  1664. queue->rx.sring);
  1665. }
  1666. int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
  1667. grant_ref_t tx_ring_ref,
  1668. grant_ref_t rx_ring_ref)
  1669. {
  1670. void *addr;
  1671. struct xen_netif_tx_sring *txs;
  1672. struct xen_netif_rx_sring *rxs;
  1673. int err = -ENOMEM;
  1674. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
  1675. &tx_ring_ref, 1, &addr);
  1676. if (err)
  1677. goto err;
  1678. txs = (struct xen_netif_tx_sring *)addr;
  1679. BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
  1680. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
  1681. &rx_ring_ref, 1, &addr);
  1682. if (err)
  1683. goto err;
  1684. rxs = (struct xen_netif_rx_sring *)addr;
  1685. BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
  1686. return 0;
  1687. err:
  1688. xenvif_unmap_frontend_data_rings(queue);
  1689. return err;
  1690. }
  1691. static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
  1692. {
  1693. struct xenvif *vif = queue->vif;
  1694. queue->stalled = true;
  1695. /* At least one queue has stalled? Disable the carrier. */
  1696. spin_lock(&vif->lock);
  1697. if (vif->stalled_queues++ == 0) {
  1698. netdev_info(vif->dev, "Guest Rx stalled");
  1699. netif_carrier_off(vif->dev);
  1700. }
  1701. spin_unlock(&vif->lock);
  1702. }
  1703. static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
  1704. {
  1705. struct xenvif *vif = queue->vif;
  1706. queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
  1707. queue->stalled = false;
  1708. /* All queues are ready? Enable the carrier. */
  1709. spin_lock(&vif->lock);
  1710. if (--vif->stalled_queues == 0) {
  1711. netdev_info(vif->dev, "Guest Rx ready");
  1712. netif_carrier_on(vif->dev);
  1713. }
  1714. spin_unlock(&vif->lock);
  1715. }
  1716. static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
  1717. {
  1718. RING_IDX prod, cons;
  1719. prod = queue->rx.sring->req_prod;
  1720. cons = queue->rx.req_cons;
  1721. return !queue->stalled && prod - cons < 1
  1722. && time_after(jiffies,
  1723. queue->last_rx_time + queue->vif->stall_timeout);
  1724. }
  1725. static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
  1726. {
  1727. RING_IDX prod, cons;
  1728. prod = queue->rx.sring->req_prod;
  1729. cons = queue->rx.req_cons;
  1730. return queue->stalled && prod - cons >= 1;
  1731. }
  1732. static bool xenvif_have_rx_work(struct xenvif_queue *queue)
  1733. {
  1734. return xenvif_rx_ring_slots_available(queue)
  1735. || (queue->vif->stall_timeout &&
  1736. (xenvif_rx_queue_stalled(queue)
  1737. || xenvif_rx_queue_ready(queue)))
  1738. || kthread_should_stop()
  1739. || queue->vif->disabled;
  1740. }
  1741. static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
  1742. {
  1743. struct sk_buff *skb;
  1744. long timeout;
  1745. skb = skb_peek(&queue->rx_queue);
  1746. if (!skb)
  1747. return MAX_SCHEDULE_TIMEOUT;
  1748. timeout = XENVIF_RX_CB(skb)->expires - jiffies;
  1749. return timeout < 0 ? 0 : timeout;
  1750. }
  1751. /* Wait until the guest Rx thread has work.
  1752. *
  1753. * The timeout needs to be adjusted based on the current head of the
  1754. * queue (and not just the head at the beginning). In particular, if
  1755. * the queue is initially empty an infinite timeout is used and this
  1756. * needs to be reduced when a skb is queued.
  1757. *
  1758. * This cannot be done with wait_event_timeout() because it only
  1759. * calculates the timeout once.
  1760. */
  1761. static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
  1762. {
  1763. DEFINE_WAIT(wait);
  1764. if (xenvif_have_rx_work(queue))
  1765. return;
  1766. for (;;) {
  1767. long ret;
  1768. prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
  1769. if (xenvif_have_rx_work(queue))
  1770. break;
  1771. ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
  1772. if (!ret)
  1773. break;
  1774. }
  1775. finish_wait(&queue->wq, &wait);
  1776. }
  1777. int xenvif_kthread_guest_rx(void *data)
  1778. {
  1779. struct xenvif_queue *queue = data;
  1780. struct xenvif *vif = queue->vif;
  1781. if (!vif->stall_timeout)
  1782. xenvif_queue_carrier_on(queue);
  1783. for (;;) {
  1784. xenvif_wait_for_rx_work(queue);
  1785. if (kthread_should_stop())
  1786. break;
  1787. /* This frontend is found to be rogue, disable it in
  1788. * kthread context. Currently this is only set when
  1789. * netback finds out frontend sends malformed packet,
  1790. * but we cannot disable the interface in softirq
  1791. * context so we defer it here, if this thread is
  1792. * associated with queue 0.
  1793. */
  1794. if (unlikely(vif->disabled && queue->id == 0)) {
  1795. xenvif_carrier_off(vif);
  1796. break;
  1797. }
  1798. if (!skb_queue_empty(&queue->rx_queue))
  1799. xenvif_rx_action(queue);
  1800. /* If the guest hasn't provided any Rx slots for a
  1801. * while it's probably not responsive, drop the
  1802. * carrier so packets are dropped earlier.
  1803. */
  1804. if (vif->stall_timeout) {
  1805. if (xenvif_rx_queue_stalled(queue))
  1806. xenvif_queue_carrier_off(queue);
  1807. else if (xenvif_rx_queue_ready(queue))
  1808. xenvif_queue_carrier_on(queue);
  1809. }
  1810. /* Queued packets may have foreign pages from other
  1811. * domains. These cannot be queued indefinitely as
  1812. * this would starve guests of grant refs and transmit
  1813. * slots.
  1814. */
  1815. xenvif_rx_queue_drop_expired(queue);
  1816. xenvif_rx_queue_maybe_wake(queue);
  1817. cond_resched();
  1818. }
  1819. /* Bin any remaining skbs */
  1820. xenvif_rx_queue_purge(queue);
  1821. return 0;
  1822. }
  1823. static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
  1824. {
  1825. /* Dealloc thread must remain running until all inflight
  1826. * packets complete.
  1827. */
  1828. return kthread_should_stop() &&
  1829. !atomic_read(&queue->inflight_packets);
  1830. }
  1831. int xenvif_dealloc_kthread(void *data)
  1832. {
  1833. struct xenvif_queue *queue = data;
  1834. for (;;) {
  1835. wait_event_interruptible(queue->dealloc_wq,
  1836. tx_dealloc_work_todo(queue) ||
  1837. xenvif_dealloc_kthread_should_stop(queue));
  1838. if (xenvif_dealloc_kthread_should_stop(queue))
  1839. break;
  1840. xenvif_tx_dealloc_action(queue);
  1841. cond_resched();
  1842. }
  1843. /* Unmap anything remaining*/
  1844. if (tx_dealloc_work_todo(queue))
  1845. xenvif_tx_dealloc_action(queue);
  1846. return 0;
  1847. }
  1848. static void make_ctrl_response(struct xenvif *vif,
  1849. const struct xen_netif_ctrl_request *req,
  1850. u32 status, u32 data)
  1851. {
  1852. RING_IDX idx = vif->ctrl.rsp_prod_pvt;
  1853. struct xen_netif_ctrl_response rsp = {
  1854. .id = req->id,
  1855. .type = req->type,
  1856. .status = status,
  1857. .data = data,
  1858. };
  1859. *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
  1860. vif->ctrl.rsp_prod_pvt = ++idx;
  1861. }
  1862. static void push_ctrl_response(struct xenvif *vif)
  1863. {
  1864. int notify;
  1865. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
  1866. if (notify)
  1867. notify_remote_via_irq(vif->ctrl_irq);
  1868. }
  1869. static void process_ctrl_request(struct xenvif *vif,
  1870. const struct xen_netif_ctrl_request *req)
  1871. {
  1872. u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
  1873. u32 data = 0;
  1874. switch (req->type) {
  1875. case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
  1876. status = xenvif_set_hash_alg(vif, req->data[0]);
  1877. break;
  1878. case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
  1879. status = xenvif_get_hash_flags(vif, &data);
  1880. break;
  1881. case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
  1882. status = xenvif_set_hash_flags(vif, req->data[0]);
  1883. break;
  1884. case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
  1885. status = xenvif_set_hash_key(vif, req->data[0],
  1886. req->data[1]);
  1887. break;
  1888. case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
  1889. status = XEN_NETIF_CTRL_STATUS_SUCCESS;
  1890. data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
  1891. break;
  1892. case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
  1893. status = xenvif_set_hash_mapping_size(vif,
  1894. req->data[0]);
  1895. break;
  1896. case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
  1897. status = xenvif_set_hash_mapping(vif, req->data[0],
  1898. req->data[1],
  1899. req->data[2]);
  1900. break;
  1901. default:
  1902. break;
  1903. }
  1904. make_ctrl_response(vif, req, status, data);
  1905. push_ctrl_response(vif);
  1906. }
  1907. static void xenvif_ctrl_action(struct xenvif *vif)
  1908. {
  1909. for (;;) {
  1910. RING_IDX req_prod, req_cons;
  1911. req_prod = vif->ctrl.sring->req_prod;
  1912. req_cons = vif->ctrl.req_cons;
  1913. /* Make sure we can see requests before we process them. */
  1914. rmb();
  1915. if (req_cons == req_prod)
  1916. break;
  1917. while (req_cons != req_prod) {
  1918. struct xen_netif_ctrl_request req;
  1919. RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
  1920. req_cons++;
  1921. process_ctrl_request(vif, &req);
  1922. }
  1923. vif->ctrl.req_cons = req_cons;
  1924. vif->ctrl.sring->req_event = req_cons + 1;
  1925. }
  1926. }
  1927. static bool xenvif_ctrl_work_todo(struct xenvif *vif)
  1928. {
  1929. if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
  1930. return 1;
  1931. return 0;
  1932. }
  1933. int xenvif_ctrl_kthread(void *data)
  1934. {
  1935. struct xenvif *vif = data;
  1936. for (;;) {
  1937. wait_event_interruptible(vif->ctrl_wq,
  1938. xenvif_ctrl_work_todo(vif) ||
  1939. kthread_should_stop());
  1940. if (kthread_should_stop())
  1941. break;
  1942. while (xenvif_ctrl_work_todo(vif))
  1943. xenvif_ctrl_action(vif);
  1944. cond_resched();
  1945. }
  1946. return 0;
  1947. }
  1948. static int __init netback_init(void)
  1949. {
  1950. int rc = 0;
  1951. if (!xen_domain())
  1952. return -ENODEV;
  1953. /* Allow as many queues as there are CPUs if user has not
  1954. * specified a value.
  1955. */
  1956. if (xenvif_max_queues == 0)
  1957. xenvif_max_queues = num_online_cpus();
  1958. if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
  1959. pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
  1960. fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  1961. fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
  1962. }
  1963. rc = xenvif_xenbus_init();
  1964. if (rc)
  1965. goto failed_init;
  1966. #ifdef CONFIG_DEBUG_FS
  1967. xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
  1968. if (IS_ERR_OR_NULL(xen_netback_dbg_root))
  1969. pr_warn("Init of debugfs returned %ld!\n",
  1970. PTR_ERR(xen_netback_dbg_root));
  1971. #endif /* CONFIG_DEBUG_FS */
  1972. return 0;
  1973. failed_init:
  1974. return rc;
  1975. }
  1976. module_init(netback_init);
  1977. static void __exit netback_fini(void)
  1978. {
  1979. #ifdef CONFIG_DEBUG_FS
  1980. if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
  1981. debugfs_remove_recursive(xen_netback_dbg_root);
  1982. #endif /* CONFIG_DEBUG_FS */
  1983. xenvif_xenbus_fini();
  1984. }
  1985. module_exit(netback_fini);
  1986. MODULE_LICENSE("Dual BSD/GPL");
  1987. MODULE_ALIAS("xen-backend:vif");