netback.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987
  1. /*
  2. * Back-end of the driver for virtual network devices. This portion of the
  3. * driver exports a 'unified' network-device interface that can be accessed
  4. * by any operating system that implements a compatible front end. A
  5. * reference front-end implementation can be found in:
  6. * drivers/net/xen-netfront.c
  7. *
  8. * Copyright (c) 2002-2005, K A Fraser
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License version 2
  12. * as published by the Free Software Foundation; or, when distributed
  13. * separately from the Linux kernel or incorporated into other
  14. * software packages, subject to the following license:
  15. *
  16. * Permission is hereby granted, free of charge, to any person obtaining a copy
  17. * of this source file (the "Software"), to deal in the Software without
  18. * restriction, including without limitation the rights to use, copy, modify,
  19. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20. * and to permit persons to whom the Software is furnished to do so, subject to
  21. * the following conditions:
  22. *
  23. * The above copyright notice and this permission notice shall be included in
  24. * all copies or substantial portions of the Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32. * IN THE SOFTWARE.
  33. */
  34. #include "common.h"
  35. #include <linux/kthread.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/udp.h>
  38. #include <linux/highmem.h>
  39. #include <net/tcp.h>
  40. #include <xen/xen.h>
  41. #include <xen/events.h>
  42. #include <xen/interface/memory.h>
  43. #include <asm/xen/hypercall.h>
  44. #include <asm/xen/page.h>
  45. /* Provide an option to disable split event channels at load time as
  46. * event channels are limited resource. Split event channels are
  47. * enabled by default.
  48. */
  49. bool separate_tx_rx_irq = 1;
  50. module_param(separate_tx_rx_irq, bool, 0644);
  51. /* When guest ring is filled up, qdisc queues the packets for us, but we have
  52. * to timeout them, otherwise other guests' packets can get stuck there
  53. */
  54. unsigned int rx_drain_timeout_msecs = 10000;
  55. module_param(rx_drain_timeout_msecs, uint, 0444);
  56. unsigned int rx_drain_timeout_jiffies;
  57. /*
  58. * This is the maximum slots a skb can have. If a guest sends a skb
  59. * which exceeds this limit it is considered malicious.
  60. */
  61. #define FATAL_SKB_SLOTS_DEFAULT 20
  62. static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  63. module_param(fatal_skb_slots, uint, 0444);
  64. static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
  65. u8 status);
  66. static void make_tx_response(struct xenvif *vif,
  67. struct xen_netif_tx_request *txp,
  68. s8 st);
  69. static inline int tx_work_todo(struct xenvif *vif);
  70. static inline int rx_work_todo(struct xenvif *vif);
  71. static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
  72. u16 id,
  73. s8 st,
  74. u16 offset,
  75. u16 size,
  76. u16 flags);
  77. static inline unsigned long idx_to_pfn(struct xenvif *vif,
  78. u16 idx)
  79. {
  80. return page_to_pfn(vif->mmap_pages[idx]);
  81. }
  82. static inline unsigned long idx_to_kaddr(struct xenvif *vif,
  83. u16 idx)
  84. {
  85. return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
  86. }
  87. #define callback_param(vif, pending_idx) \
  88. (vif->pending_tx_info[pending_idx].callback_struct)
  89. /* Find the containing VIF's structure from a pointer in pending_tx_info array
  90. */
  91. static inline struct xenvif *ubuf_to_vif(const struct ubuf_info *ubuf)
  92. {
  93. u16 pending_idx = ubuf->desc;
  94. struct pending_tx_info *temp =
  95. container_of(ubuf, struct pending_tx_info, callback_struct);
  96. return container_of(temp - pending_idx,
  97. struct xenvif,
  98. pending_tx_info[0]);
  99. }
  100. /* This is a miniumum size for the linear area to avoid lots of
  101. * calls to __pskb_pull_tail() as we set up checksum offsets. The
  102. * value 128 was chosen as it covers all IPv4 and most likely
  103. * IPv6 headers.
  104. */
  105. #define PKT_PROT_LEN 128
  106. static u16 frag_get_pending_idx(skb_frag_t *frag)
  107. {
  108. return (u16)frag->page_offset;
  109. }
  110. static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
  111. {
  112. frag->page_offset = pending_idx;
  113. }
  114. static inline pending_ring_idx_t pending_index(unsigned i)
  115. {
  116. return i & (MAX_PENDING_REQS-1);
  117. }
  118. bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
  119. {
  120. RING_IDX prod, cons;
  121. do {
  122. prod = vif->rx.sring->req_prod;
  123. cons = vif->rx.req_cons;
  124. if (prod - cons >= needed)
  125. return true;
  126. vif->rx.sring->req_event = prod + 1;
  127. /* Make sure event is visible before we check prod
  128. * again.
  129. */
  130. mb();
  131. } while (vif->rx.sring->req_prod != prod);
  132. return false;
  133. }
  134. /*
  135. * Returns true if we should start a new receive buffer instead of
  136. * adding 'size' bytes to a buffer which currently contains 'offset'
  137. * bytes.
  138. */
  139. static bool start_new_rx_buffer(int offset, unsigned long size, int head)
  140. {
  141. /* simple case: we have completely filled the current buffer. */
  142. if (offset == MAX_BUFFER_OFFSET)
  143. return true;
  144. /*
  145. * complex case: start a fresh buffer if the current frag
  146. * would overflow the current buffer but only if:
  147. * (i) this frag would fit completely in the next buffer
  148. * and (ii) there is already some data in the current buffer
  149. * and (iii) this is not the head buffer.
  150. *
  151. * Where:
  152. * - (i) stops us splitting a frag into two copies
  153. * unless the frag is too large for a single buffer.
  154. * - (ii) stops us from leaving a buffer pointlessly empty.
  155. * - (iii) stops us leaving the first buffer
  156. * empty. Strictly speaking this is already covered
  157. * by (ii) but is explicitly checked because
  158. * netfront relies on the first buffer being
  159. * non-empty and can crash otherwise.
  160. *
  161. * This means we will effectively linearise small
  162. * frags but do not needlessly split large buffers
  163. * into multiple copies tend to give large frags their
  164. * own buffers as before.
  165. */
  166. BUG_ON(size > MAX_BUFFER_OFFSET);
  167. if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head)
  168. return true;
  169. return false;
  170. }
  171. struct netrx_pending_operations {
  172. unsigned copy_prod, copy_cons;
  173. unsigned meta_prod, meta_cons;
  174. struct gnttab_copy *copy;
  175. struct xenvif_rx_meta *meta;
  176. int copy_off;
  177. grant_ref_t copy_gref;
  178. };
  179. static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
  180. struct netrx_pending_operations *npo)
  181. {
  182. struct xenvif_rx_meta *meta;
  183. struct xen_netif_rx_request *req;
  184. req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
  185. meta = npo->meta + npo->meta_prod++;
  186. meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
  187. meta->gso_size = 0;
  188. meta->size = 0;
  189. meta->id = req->id;
  190. npo->copy_off = 0;
  191. npo->copy_gref = req->gref;
  192. return meta;
  193. }
  194. /*
  195. * Set up the grant operations for this fragment. If it's a flipping
  196. * interface, we also set up the unmap request from here.
  197. */
  198. static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
  199. struct netrx_pending_operations *npo,
  200. struct page *page, unsigned long size,
  201. unsigned long offset, int *head,
  202. struct xenvif *foreign_vif,
  203. grant_ref_t foreign_gref)
  204. {
  205. struct gnttab_copy *copy_gop;
  206. struct xenvif_rx_meta *meta;
  207. unsigned long bytes;
  208. int gso_type = XEN_NETIF_GSO_TYPE_NONE;
  209. /* Data must not cross a page boundary. */
  210. BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
  211. meta = npo->meta + npo->meta_prod - 1;
  212. /* Skip unused frames from start of page */
  213. page += offset >> PAGE_SHIFT;
  214. offset &= ~PAGE_MASK;
  215. while (size > 0) {
  216. BUG_ON(offset >= PAGE_SIZE);
  217. BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
  218. bytes = PAGE_SIZE - offset;
  219. if (bytes > size)
  220. bytes = size;
  221. if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
  222. /*
  223. * Netfront requires there to be some data in the head
  224. * buffer.
  225. */
  226. BUG_ON(*head);
  227. meta = get_next_rx_buffer(vif, npo);
  228. }
  229. if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
  230. bytes = MAX_BUFFER_OFFSET - npo->copy_off;
  231. copy_gop = npo->copy + npo->copy_prod++;
  232. copy_gop->flags = GNTCOPY_dest_gref;
  233. copy_gop->len = bytes;
  234. if (foreign_vif) {
  235. copy_gop->source.domid = foreign_vif->domid;
  236. copy_gop->source.u.ref = foreign_gref;
  237. copy_gop->flags |= GNTCOPY_source_gref;
  238. } else {
  239. copy_gop->source.domid = DOMID_SELF;
  240. copy_gop->source.u.gmfn =
  241. virt_to_mfn(page_address(page));
  242. }
  243. copy_gop->source.offset = offset;
  244. copy_gop->dest.domid = vif->domid;
  245. copy_gop->dest.offset = npo->copy_off;
  246. copy_gop->dest.u.ref = npo->copy_gref;
  247. npo->copy_off += bytes;
  248. meta->size += bytes;
  249. offset += bytes;
  250. size -= bytes;
  251. /* Next frame */
  252. if (offset == PAGE_SIZE && size) {
  253. BUG_ON(!PageCompound(page));
  254. page++;
  255. offset = 0;
  256. }
  257. /* Leave a gap for the GSO descriptor. */
  258. if (skb_is_gso(skb)) {
  259. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  260. gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
  261. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  262. gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
  263. }
  264. if (*head && ((1 << gso_type) & vif->gso_mask))
  265. vif->rx.req_cons++;
  266. *head = 0; /* There must be something in this buffer now. */
  267. }
  268. }
  269. /*
  270. * Find the grant ref for a given frag in a chain of struct ubuf_info's
  271. * skb: the skb itself
  272. * i: the frag's number
  273. * ubuf: a pointer to an element in the chain. It should not be NULL
  274. *
  275. * Returns a pointer to the element in the chain where the page were found. If
  276. * not found, returns NULL.
  277. * See the definition of callback_struct in common.h for more details about
  278. * the chain.
  279. */
  280. static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
  281. const int i,
  282. const struct ubuf_info *ubuf)
  283. {
  284. struct xenvif *foreign_vif = ubuf_to_vif(ubuf);
  285. do {
  286. u16 pending_idx = ubuf->desc;
  287. if (skb_shinfo(skb)->frags[i].page.p ==
  288. foreign_vif->mmap_pages[pending_idx])
  289. break;
  290. ubuf = (struct ubuf_info *) ubuf->ctx;
  291. } while (ubuf);
  292. return ubuf;
  293. }
  294. /*
  295. * Prepare an SKB to be transmitted to the frontend.
  296. *
  297. * This function is responsible for allocating grant operations, meta
  298. * structures, etc.
  299. *
  300. * It returns the number of meta structures consumed. The number of
  301. * ring slots used is always equal to the number of meta slots used
  302. * plus the number of GSO descriptors used. Currently, we use either
  303. * zero GSO descriptors (for non-GSO packets) or one descriptor (for
  304. * frontend-side LRO).
  305. */
  306. static int xenvif_gop_skb(struct sk_buff *skb,
  307. struct netrx_pending_operations *npo)
  308. {
  309. struct xenvif *vif = netdev_priv(skb->dev);
  310. int nr_frags = skb_shinfo(skb)->nr_frags;
  311. int i;
  312. struct xen_netif_rx_request *req;
  313. struct xenvif_rx_meta *meta;
  314. unsigned char *data;
  315. int head = 1;
  316. int old_meta_prod;
  317. int gso_type;
  318. const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
  319. const struct ubuf_info *const head_ubuf = ubuf;
  320. old_meta_prod = npo->meta_prod;
  321. gso_type = XEN_NETIF_GSO_TYPE_NONE;
  322. if (skb_is_gso(skb)) {
  323. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  324. gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
  325. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  326. gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
  327. }
  328. /* Set up a GSO prefix descriptor, if necessary */
  329. if ((1 << gso_type) & vif->gso_prefix_mask) {
  330. req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
  331. meta = npo->meta + npo->meta_prod++;
  332. meta->gso_type = gso_type;
  333. meta->gso_size = skb_shinfo(skb)->gso_size;
  334. meta->size = 0;
  335. meta->id = req->id;
  336. }
  337. req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
  338. meta = npo->meta + npo->meta_prod++;
  339. if ((1 << gso_type) & vif->gso_mask) {
  340. meta->gso_type = gso_type;
  341. meta->gso_size = skb_shinfo(skb)->gso_size;
  342. } else {
  343. meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
  344. meta->gso_size = 0;
  345. }
  346. meta->size = 0;
  347. meta->id = req->id;
  348. npo->copy_off = 0;
  349. npo->copy_gref = req->gref;
  350. data = skb->data;
  351. while (data < skb_tail_pointer(skb)) {
  352. unsigned int offset = offset_in_page(data);
  353. unsigned int len = PAGE_SIZE - offset;
  354. if (data + len > skb_tail_pointer(skb))
  355. len = skb_tail_pointer(skb) - data;
  356. xenvif_gop_frag_copy(vif, skb, npo,
  357. virt_to_page(data), len, offset, &head,
  358. NULL,
  359. 0);
  360. data += len;
  361. }
  362. for (i = 0; i < nr_frags; i++) {
  363. /* This variable also signals whether foreign_gref has a real
  364. * value or not.
  365. */
  366. struct xenvif *foreign_vif = NULL;
  367. grant_ref_t foreign_gref;
  368. if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
  369. (ubuf->callback == &xenvif_zerocopy_callback)) {
  370. const struct ubuf_info *const startpoint = ubuf;
  371. /* Ideally ubuf points to the chain element which
  372. * belongs to this frag. Or if frags were removed from
  373. * the beginning, then shortly before it.
  374. */
  375. ubuf = xenvif_find_gref(skb, i, ubuf);
  376. /* Try again from the beginning of the list, if we
  377. * haven't tried from there. This only makes sense in
  378. * the unlikely event of reordering the original frags.
  379. * For injected local pages it's an unnecessary second
  380. * run.
  381. */
  382. if (unlikely(!ubuf) && startpoint != head_ubuf)
  383. ubuf = xenvif_find_gref(skb, i, head_ubuf);
  384. if (likely(ubuf)) {
  385. u16 pending_idx = ubuf->desc;
  386. foreign_vif = ubuf_to_vif(ubuf);
  387. foreign_gref = foreign_vif->pending_tx_info[pending_idx].req.gref;
  388. /* Just a safety measure. If this was the last
  389. * element on the list, the for loop will
  390. * iterate again if a local page were added to
  391. * the end. Using head_ubuf here prevents the
  392. * second search on the chain. Or the original
  393. * frags changed order, but that's less likely.
  394. * In any way, ubuf shouldn't be NULL.
  395. */
  396. ubuf = ubuf->ctx ?
  397. (struct ubuf_info *) ubuf->ctx :
  398. head_ubuf;
  399. } else
  400. /* This frag was a local page, added to the
  401. * array after the skb left netback.
  402. */
  403. ubuf = head_ubuf;
  404. }
  405. xenvif_gop_frag_copy(vif, skb, npo,
  406. skb_frag_page(&skb_shinfo(skb)->frags[i]),
  407. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  408. skb_shinfo(skb)->frags[i].page_offset,
  409. &head,
  410. foreign_vif,
  411. foreign_vif ? foreign_gref : UINT_MAX);
  412. }
  413. return npo->meta_prod - old_meta_prod;
  414. }
  415. /*
  416. * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
  417. * used to set up the operations on the top of
  418. * netrx_pending_operations, which have since been done. Check that
  419. * they didn't give any errors and advance over them.
  420. */
  421. static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
  422. struct netrx_pending_operations *npo)
  423. {
  424. struct gnttab_copy *copy_op;
  425. int status = XEN_NETIF_RSP_OKAY;
  426. int i;
  427. for (i = 0; i < nr_meta_slots; i++) {
  428. copy_op = npo->copy + npo->copy_cons++;
  429. if (copy_op->status != GNTST_okay) {
  430. netdev_dbg(vif->dev,
  431. "Bad status %d from copy to DOM%d.\n",
  432. copy_op->status, vif->domid);
  433. status = XEN_NETIF_RSP_ERROR;
  434. }
  435. }
  436. return status;
  437. }
  438. static void xenvif_add_frag_responses(struct xenvif *vif, int status,
  439. struct xenvif_rx_meta *meta,
  440. int nr_meta_slots)
  441. {
  442. int i;
  443. unsigned long offset;
  444. /* No fragments used */
  445. if (nr_meta_slots <= 1)
  446. return;
  447. nr_meta_slots--;
  448. for (i = 0; i < nr_meta_slots; i++) {
  449. int flags;
  450. if (i == nr_meta_slots - 1)
  451. flags = 0;
  452. else
  453. flags = XEN_NETRXF_more_data;
  454. offset = 0;
  455. make_rx_response(vif, meta[i].id, status, offset,
  456. meta[i].size, flags);
  457. }
  458. }
  459. struct xenvif_rx_cb {
  460. int meta_slots_used;
  461. };
  462. #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
  463. void xenvif_kick_thread(struct xenvif *vif)
  464. {
  465. wake_up(&vif->wq);
  466. }
  467. static void xenvif_rx_action(struct xenvif *vif)
  468. {
  469. s8 status;
  470. u16 flags;
  471. struct xen_netif_rx_response *resp;
  472. struct sk_buff_head rxq;
  473. struct sk_buff *skb;
  474. LIST_HEAD(notify);
  475. int ret;
  476. unsigned long offset;
  477. bool need_to_notify = false;
  478. struct netrx_pending_operations npo = {
  479. .copy = vif->grant_copy_op,
  480. .meta = vif->meta,
  481. };
  482. skb_queue_head_init(&rxq);
  483. while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
  484. RING_IDX max_slots_needed;
  485. RING_IDX old_req_cons;
  486. RING_IDX ring_slots_used;
  487. int i;
  488. /* We need a cheap worse case estimate for the number of
  489. * slots we'll use.
  490. */
  491. max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
  492. skb_headlen(skb),
  493. PAGE_SIZE);
  494. for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
  495. unsigned int size;
  496. unsigned int offset;
  497. size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
  498. offset = skb_shinfo(skb)->frags[i].page_offset;
  499. /* For a worse-case estimate we need to factor in
  500. * the fragment page offset as this will affect the
  501. * number of times xenvif_gop_frag_copy() will
  502. * call start_new_rx_buffer().
  503. */
  504. max_slots_needed += DIV_ROUND_UP(offset + size,
  505. PAGE_SIZE);
  506. }
  507. /* To avoid the estimate becoming too pessimal for some
  508. * frontends that limit posted rx requests, cap the estimate
  509. * at MAX_SKB_FRAGS.
  510. */
  511. if (max_slots_needed > MAX_SKB_FRAGS)
  512. max_slots_needed = MAX_SKB_FRAGS;
  513. /* We may need one more slot for GSO metadata */
  514. if (skb_is_gso(skb) &&
  515. (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
  516. skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
  517. max_slots_needed++;
  518. /* If the skb may not fit then bail out now */
  519. if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
  520. skb_queue_head(&vif->rx_queue, skb);
  521. need_to_notify = true;
  522. vif->rx_last_skb_slots = max_slots_needed;
  523. break;
  524. } else
  525. vif->rx_last_skb_slots = 0;
  526. old_req_cons = vif->rx.req_cons;
  527. XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo);
  528. ring_slots_used = vif->rx.req_cons - old_req_cons;
  529. BUG_ON(ring_slots_used > max_slots_needed);
  530. __skb_queue_tail(&rxq, skb);
  531. }
  532. BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
  533. if (!npo.copy_prod)
  534. goto done;
  535. BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
  536. gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
  537. while ((skb = __skb_dequeue(&rxq)) != NULL) {
  538. if ((1 << vif->meta[npo.meta_cons].gso_type) &
  539. vif->gso_prefix_mask) {
  540. resp = RING_GET_RESPONSE(&vif->rx,
  541. vif->rx.rsp_prod_pvt++);
  542. resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
  543. resp->offset = vif->meta[npo.meta_cons].gso_size;
  544. resp->id = vif->meta[npo.meta_cons].id;
  545. resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
  546. npo.meta_cons++;
  547. XENVIF_RX_CB(skb)->meta_slots_used--;
  548. }
  549. vif->dev->stats.tx_bytes += skb->len;
  550. vif->dev->stats.tx_packets++;
  551. status = xenvif_check_gop(vif,
  552. XENVIF_RX_CB(skb)->meta_slots_used,
  553. &npo);
  554. if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
  555. flags = 0;
  556. else
  557. flags = XEN_NETRXF_more_data;
  558. if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
  559. flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
  560. else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  561. /* remote but checksummed. */
  562. flags |= XEN_NETRXF_data_validated;
  563. offset = 0;
  564. resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
  565. status, offset,
  566. vif->meta[npo.meta_cons].size,
  567. flags);
  568. if ((1 << vif->meta[npo.meta_cons].gso_type) &
  569. vif->gso_mask) {
  570. struct xen_netif_extra_info *gso =
  571. (struct xen_netif_extra_info *)
  572. RING_GET_RESPONSE(&vif->rx,
  573. vif->rx.rsp_prod_pvt++);
  574. resp->flags |= XEN_NETRXF_extra_info;
  575. gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
  576. gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
  577. gso->u.gso.pad = 0;
  578. gso->u.gso.features = 0;
  579. gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
  580. gso->flags = 0;
  581. }
  582. xenvif_add_frag_responses(vif, status,
  583. vif->meta + npo.meta_cons + 1,
  584. XENVIF_RX_CB(skb)->meta_slots_used);
  585. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
  586. need_to_notify |= !!ret;
  587. npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
  588. dev_kfree_skb(skb);
  589. }
  590. done:
  591. if (need_to_notify)
  592. notify_remote_via_irq(vif->rx_irq);
  593. }
  594. void xenvif_napi_schedule_or_enable_events(struct xenvif *vif)
  595. {
  596. int more_to_do;
  597. RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
  598. if (more_to_do)
  599. napi_schedule(&vif->napi);
  600. }
  601. static void tx_add_credit(struct xenvif *vif)
  602. {
  603. unsigned long max_burst, max_credit;
  604. /*
  605. * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
  606. * Otherwise the interface can seize up due to insufficient credit.
  607. */
  608. max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
  609. max_burst = min(max_burst, 131072UL);
  610. max_burst = max(max_burst, vif->credit_bytes);
  611. /* Take care that adding a new chunk of credit doesn't wrap to zero. */
  612. max_credit = vif->remaining_credit + vif->credit_bytes;
  613. if (max_credit < vif->remaining_credit)
  614. max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
  615. vif->remaining_credit = min(max_credit, max_burst);
  616. }
  617. static void tx_credit_callback(unsigned long data)
  618. {
  619. struct xenvif *vif = (struct xenvif *)data;
  620. tx_add_credit(vif);
  621. xenvif_napi_schedule_or_enable_events(vif);
  622. }
  623. static void xenvif_tx_err(struct xenvif *vif,
  624. struct xen_netif_tx_request *txp, RING_IDX end)
  625. {
  626. RING_IDX cons = vif->tx.req_cons;
  627. unsigned long flags;
  628. do {
  629. spin_lock_irqsave(&vif->response_lock, flags);
  630. make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
  631. spin_unlock_irqrestore(&vif->response_lock, flags);
  632. if (cons == end)
  633. break;
  634. txp = RING_GET_REQUEST(&vif->tx, cons++);
  635. } while (1);
  636. vif->tx.req_cons = cons;
  637. }
  638. static void xenvif_fatal_tx_err(struct xenvif *vif)
  639. {
  640. netdev_err(vif->dev, "fatal error; disabling device\n");
  641. vif->disabled = true;
  642. xenvif_kick_thread(vif);
  643. }
  644. static int xenvif_count_requests(struct xenvif *vif,
  645. struct xen_netif_tx_request *first,
  646. struct xen_netif_tx_request *txp,
  647. int work_to_do)
  648. {
  649. RING_IDX cons = vif->tx.req_cons;
  650. int slots = 0;
  651. int drop_err = 0;
  652. int more_data;
  653. if (!(first->flags & XEN_NETTXF_more_data))
  654. return 0;
  655. do {
  656. struct xen_netif_tx_request dropped_tx = { 0 };
  657. if (slots >= work_to_do) {
  658. netdev_err(vif->dev,
  659. "Asked for %d slots but exceeds this limit\n",
  660. work_to_do);
  661. xenvif_fatal_tx_err(vif);
  662. return -ENODATA;
  663. }
  664. /* This guest is really using too many slots and
  665. * considered malicious.
  666. */
  667. if (unlikely(slots >= fatal_skb_slots)) {
  668. netdev_err(vif->dev,
  669. "Malicious frontend using %d slots, threshold %u\n",
  670. slots, fatal_skb_slots);
  671. xenvif_fatal_tx_err(vif);
  672. return -E2BIG;
  673. }
  674. /* Xen network protocol had implicit dependency on
  675. * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
  676. * the historical MAX_SKB_FRAGS value 18 to honor the
  677. * same behavior as before. Any packet using more than
  678. * 18 slots but less than fatal_skb_slots slots is
  679. * dropped
  680. */
  681. if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
  682. if (net_ratelimit())
  683. netdev_dbg(vif->dev,
  684. "Too many slots (%d) exceeding limit (%d), dropping packet\n",
  685. slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  686. drop_err = -E2BIG;
  687. }
  688. if (drop_err)
  689. txp = &dropped_tx;
  690. memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
  691. sizeof(*txp));
  692. /* If the guest submitted a frame >= 64 KiB then
  693. * first->size overflowed and following slots will
  694. * appear to be larger than the frame.
  695. *
  696. * This cannot be fatal error as there are buggy
  697. * frontends that do this.
  698. *
  699. * Consume all slots and drop the packet.
  700. */
  701. if (!drop_err && txp->size > first->size) {
  702. if (net_ratelimit())
  703. netdev_dbg(vif->dev,
  704. "Invalid tx request, slot size %u > remaining size %u\n",
  705. txp->size, first->size);
  706. drop_err = -EIO;
  707. }
  708. first->size -= txp->size;
  709. slots++;
  710. if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
  711. netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
  712. txp->offset, txp->size);
  713. xenvif_fatal_tx_err(vif);
  714. return -EINVAL;
  715. }
  716. more_data = txp->flags & XEN_NETTXF_more_data;
  717. if (!drop_err)
  718. txp++;
  719. } while (more_data);
  720. if (drop_err) {
  721. xenvif_tx_err(vif, first, cons + slots);
  722. return drop_err;
  723. }
  724. return slots;
  725. }
  726. struct xenvif_tx_cb {
  727. u16 pending_idx;
  728. };
  729. #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
  730. static inline void xenvif_tx_create_map_op(struct xenvif *vif,
  731. u16 pending_idx,
  732. struct xen_netif_tx_request *txp,
  733. struct gnttab_map_grant_ref *mop)
  734. {
  735. vif->pages_to_map[mop-vif->tx_map_ops] = vif->mmap_pages[pending_idx];
  736. gnttab_set_map_op(mop, idx_to_kaddr(vif, pending_idx),
  737. GNTMAP_host_map | GNTMAP_readonly,
  738. txp->gref, vif->domid);
  739. memcpy(&vif->pending_tx_info[pending_idx].req, txp,
  740. sizeof(*txp));
  741. }
  742. static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
  743. {
  744. struct sk_buff *skb =
  745. alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
  746. GFP_ATOMIC | __GFP_NOWARN);
  747. if (unlikely(skb == NULL))
  748. return NULL;
  749. /* Packets passed to netif_rx() must have some headroom. */
  750. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  751. /* Initialize it here to avoid later surprises */
  752. skb_shinfo(skb)->destructor_arg = NULL;
  753. return skb;
  754. }
  755. static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif *vif,
  756. struct sk_buff *skb,
  757. struct xen_netif_tx_request *txp,
  758. struct gnttab_map_grant_ref *gop)
  759. {
  760. struct skb_shared_info *shinfo = skb_shinfo(skb);
  761. skb_frag_t *frags = shinfo->frags;
  762. u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  763. int start;
  764. pending_ring_idx_t index;
  765. unsigned int nr_slots, frag_overflow = 0;
  766. /* At this point shinfo->nr_frags is in fact the number of
  767. * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
  768. */
  769. if (shinfo->nr_frags > MAX_SKB_FRAGS) {
  770. frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
  771. BUG_ON(frag_overflow > MAX_SKB_FRAGS);
  772. shinfo->nr_frags = MAX_SKB_FRAGS;
  773. }
  774. nr_slots = shinfo->nr_frags;
  775. /* Skip first skb fragment if it is on same page as header fragment. */
  776. start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
  777. for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
  778. shinfo->nr_frags++, txp++, gop++) {
  779. index = pending_index(vif->pending_cons++);
  780. pending_idx = vif->pending_ring[index];
  781. xenvif_tx_create_map_op(vif, pending_idx, txp, gop);
  782. frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
  783. }
  784. if (frag_overflow) {
  785. struct sk_buff *nskb = xenvif_alloc_skb(0);
  786. if (unlikely(nskb == NULL)) {
  787. if (net_ratelimit())
  788. netdev_err(vif->dev,
  789. "Can't allocate the frag_list skb.\n");
  790. return NULL;
  791. }
  792. shinfo = skb_shinfo(nskb);
  793. frags = shinfo->frags;
  794. for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
  795. shinfo->nr_frags++, txp++, gop++) {
  796. index = pending_index(vif->pending_cons++);
  797. pending_idx = vif->pending_ring[index];
  798. xenvif_tx_create_map_op(vif, pending_idx, txp, gop);
  799. frag_set_pending_idx(&frags[shinfo->nr_frags],
  800. pending_idx);
  801. }
  802. skb_shinfo(skb)->frag_list = nskb;
  803. }
  804. return gop;
  805. }
  806. static inline void xenvif_grant_handle_set(struct xenvif *vif,
  807. u16 pending_idx,
  808. grant_handle_t handle)
  809. {
  810. if (unlikely(vif->grant_tx_handle[pending_idx] !=
  811. NETBACK_INVALID_HANDLE)) {
  812. netdev_err(vif->dev,
  813. "Trying to overwrite active handle! pending_idx: %x\n",
  814. pending_idx);
  815. BUG();
  816. }
  817. vif->grant_tx_handle[pending_idx] = handle;
  818. }
  819. static inline void xenvif_grant_handle_reset(struct xenvif *vif,
  820. u16 pending_idx)
  821. {
  822. if (unlikely(vif->grant_tx_handle[pending_idx] ==
  823. NETBACK_INVALID_HANDLE)) {
  824. netdev_err(vif->dev,
  825. "Trying to unmap invalid handle! pending_idx: %x\n",
  826. pending_idx);
  827. BUG();
  828. }
  829. vif->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
  830. }
  831. static int xenvif_tx_check_gop(struct xenvif *vif,
  832. struct sk_buff *skb,
  833. struct gnttab_map_grant_ref **gopp_map,
  834. struct gnttab_copy **gopp_copy)
  835. {
  836. struct gnttab_map_grant_ref *gop_map = *gopp_map;
  837. u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  838. struct skb_shared_info *shinfo = skb_shinfo(skb);
  839. int nr_frags = shinfo->nr_frags;
  840. int i, err;
  841. struct sk_buff *first_skb = NULL;
  842. /* Check status of header. */
  843. err = (*gopp_copy)->status;
  844. (*gopp_copy)++;
  845. if (unlikely(err)) {
  846. if (net_ratelimit())
  847. netdev_dbg(vif->dev,
  848. "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
  849. (*gopp_copy)->status,
  850. pending_idx,
  851. (*gopp_copy)->source.u.ref);
  852. xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
  853. }
  854. check_frags:
  855. for (i = 0; i < nr_frags; i++, gop_map++) {
  856. int j, newerr;
  857. pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
  858. /* Check error status: if okay then remember grant handle. */
  859. newerr = gop_map->status;
  860. if (likely(!newerr)) {
  861. xenvif_grant_handle_set(vif,
  862. pending_idx,
  863. gop_map->handle);
  864. /* Had a previous error? Invalidate this fragment. */
  865. if (unlikely(err))
  866. xenvif_idx_unmap(vif, pending_idx);
  867. continue;
  868. }
  869. /* Error on this fragment: respond to client with an error. */
  870. if (net_ratelimit())
  871. netdev_dbg(vif->dev,
  872. "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
  873. i,
  874. gop_map->status,
  875. pending_idx,
  876. gop_map->ref);
  877. xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
  878. /* Not the first error? Preceding frags already invalidated. */
  879. if (err)
  880. continue;
  881. /* First error: invalidate preceding fragments. */
  882. for (j = 0; j < i; j++) {
  883. pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
  884. xenvif_idx_unmap(vif, pending_idx);
  885. }
  886. /* Remember the error: invalidate all subsequent fragments. */
  887. err = newerr;
  888. }
  889. if (skb_has_frag_list(skb)) {
  890. first_skb = skb;
  891. skb = shinfo->frag_list;
  892. shinfo = skb_shinfo(skb);
  893. nr_frags = shinfo->nr_frags;
  894. goto check_frags;
  895. }
  896. /* There was a mapping error in the frag_list skb. We have to unmap
  897. * the first skb's frags
  898. */
  899. if (first_skb && err) {
  900. int j;
  901. shinfo = skb_shinfo(first_skb);
  902. for (j = 0; j < shinfo->nr_frags; j++) {
  903. pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
  904. xenvif_idx_unmap(vif, pending_idx);
  905. }
  906. }
  907. *gopp_map = gop_map;
  908. return err;
  909. }
  910. static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
  911. {
  912. struct skb_shared_info *shinfo = skb_shinfo(skb);
  913. int nr_frags = shinfo->nr_frags;
  914. int i;
  915. u16 prev_pending_idx = INVALID_PENDING_IDX;
  916. for (i = 0; i < nr_frags; i++) {
  917. skb_frag_t *frag = shinfo->frags + i;
  918. struct xen_netif_tx_request *txp;
  919. struct page *page;
  920. u16 pending_idx;
  921. pending_idx = frag_get_pending_idx(frag);
  922. /* If this is not the first frag, chain it to the previous*/
  923. if (prev_pending_idx == INVALID_PENDING_IDX)
  924. skb_shinfo(skb)->destructor_arg =
  925. &callback_param(vif, pending_idx);
  926. else
  927. callback_param(vif, prev_pending_idx).ctx =
  928. &callback_param(vif, pending_idx);
  929. callback_param(vif, pending_idx).ctx = NULL;
  930. prev_pending_idx = pending_idx;
  931. txp = &vif->pending_tx_info[pending_idx].req;
  932. page = virt_to_page(idx_to_kaddr(vif, pending_idx));
  933. __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
  934. skb->len += txp->size;
  935. skb->data_len += txp->size;
  936. skb->truesize += txp->size;
  937. /* Take an extra reference to offset network stack's put_page */
  938. get_page(vif->mmap_pages[pending_idx]);
  939. }
  940. /* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
  941. * overlaps with "index", and "mapping" is not set. I think mapping
  942. * should be set. If delivered to local stack, it would drop this
  943. * skb in sk_filter unless the socket has the right to use it.
  944. */
  945. skb->pfmemalloc = false;
  946. }
  947. static int xenvif_get_extras(struct xenvif *vif,
  948. struct xen_netif_extra_info *extras,
  949. int work_to_do)
  950. {
  951. struct xen_netif_extra_info extra;
  952. RING_IDX cons = vif->tx.req_cons;
  953. do {
  954. if (unlikely(work_to_do-- <= 0)) {
  955. netdev_err(vif->dev, "Missing extra info\n");
  956. xenvif_fatal_tx_err(vif);
  957. return -EBADR;
  958. }
  959. memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
  960. sizeof(extra));
  961. if (unlikely(!extra.type ||
  962. extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
  963. vif->tx.req_cons = ++cons;
  964. netdev_err(vif->dev,
  965. "Invalid extra type: %d\n", extra.type);
  966. xenvif_fatal_tx_err(vif);
  967. return -EINVAL;
  968. }
  969. memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
  970. vif->tx.req_cons = ++cons;
  971. } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
  972. return work_to_do;
  973. }
  974. static int xenvif_set_skb_gso(struct xenvif *vif,
  975. struct sk_buff *skb,
  976. struct xen_netif_extra_info *gso)
  977. {
  978. if (!gso->u.gso.size) {
  979. netdev_err(vif->dev, "GSO size must not be zero.\n");
  980. xenvif_fatal_tx_err(vif);
  981. return -EINVAL;
  982. }
  983. switch (gso->u.gso.type) {
  984. case XEN_NETIF_GSO_TYPE_TCPV4:
  985. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  986. break;
  987. case XEN_NETIF_GSO_TYPE_TCPV6:
  988. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  989. break;
  990. default:
  991. netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
  992. xenvif_fatal_tx_err(vif);
  993. return -EINVAL;
  994. }
  995. skb_shinfo(skb)->gso_size = gso->u.gso.size;
  996. /* gso_segs will be calculated later */
  997. return 0;
  998. }
  999. static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
  1000. {
  1001. bool recalculate_partial_csum = false;
  1002. /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
  1003. * peers can fail to set NETRXF_csum_blank when sending a GSO
  1004. * frame. In this case force the SKB to CHECKSUM_PARTIAL and
  1005. * recalculate the partial checksum.
  1006. */
  1007. if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
  1008. vif->rx_gso_checksum_fixup++;
  1009. skb->ip_summed = CHECKSUM_PARTIAL;
  1010. recalculate_partial_csum = true;
  1011. }
  1012. /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
  1013. if (skb->ip_summed != CHECKSUM_PARTIAL)
  1014. return 0;
  1015. return skb_checksum_setup(skb, recalculate_partial_csum);
  1016. }
  1017. static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
  1018. {
  1019. u64 now = get_jiffies_64();
  1020. u64 next_credit = vif->credit_window_start +
  1021. msecs_to_jiffies(vif->credit_usec / 1000);
  1022. /* Timer could already be pending in rare cases. */
  1023. if (timer_pending(&vif->credit_timeout))
  1024. return true;
  1025. /* Passed the point where we can replenish credit? */
  1026. if (time_after_eq64(now, next_credit)) {
  1027. vif->credit_window_start = now;
  1028. tx_add_credit(vif);
  1029. }
  1030. /* Still too big to send right now? Set a callback. */
  1031. if (size > vif->remaining_credit) {
  1032. vif->credit_timeout.data =
  1033. (unsigned long)vif;
  1034. vif->credit_timeout.function =
  1035. tx_credit_callback;
  1036. mod_timer(&vif->credit_timeout,
  1037. next_credit);
  1038. vif->credit_window_start = next_credit;
  1039. return true;
  1040. }
  1041. return false;
  1042. }
  1043. static void xenvif_tx_build_gops(struct xenvif *vif,
  1044. int budget,
  1045. unsigned *copy_ops,
  1046. unsigned *map_ops)
  1047. {
  1048. struct gnttab_map_grant_ref *gop = vif->tx_map_ops, *request_gop;
  1049. struct sk_buff *skb;
  1050. int ret;
  1051. while (skb_queue_len(&vif->tx_queue) < budget) {
  1052. struct xen_netif_tx_request txreq;
  1053. struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
  1054. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
  1055. u16 pending_idx;
  1056. RING_IDX idx;
  1057. int work_to_do;
  1058. unsigned int data_len;
  1059. pending_ring_idx_t index;
  1060. if (vif->tx.sring->req_prod - vif->tx.req_cons >
  1061. XEN_NETIF_TX_RING_SIZE) {
  1062. netdev_err(vif->dev,
  1063. "Impossible number of requests. "
  1064. "req_prod %d, req_cons %d, size %ld\n",
  1065. vif->tx.sring->req_prod, vif->tx.req_cons,
  1066. XEN_NETIF_TX_RING_SIZE);
  1067. xenvif_fatal_tx_err(vif);
  1068. break;
  1069. }
  1070. work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
  1071. if (!work_to_do)
  1072. break;
  1073. idx = vif->tx.req_cons;
  1074. rmb(); /* Ensure that we see the request before we copy it. */
  1075. memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
  1076. /* Credit-based scheduling. */
  1077. if (txreq.size > vif->remaining_credit &&
  1078. tx_credit_exceeded(vif, txreq.size))
  1079. break;
  1080. vif->remaining_credit -= txreq.size;
  1081. work_to_do--;
  1082. vif->tx.req_cons = ++idx;
  1083. memset(extras, 0, sizeof(extras));
  1084. if (txreq.flags & XEN_NETTXF_extra_info) {
  1085. work_to_do = xenvif_get_extras(vif, extras,
  1086. work_to_do);
  1087. idx = vif->tx.req_cons;
  1088. if (unlikely(work_to_do < 0))
  1089. break;
  1090. }
  1091. ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
  1092. if (unlikely(ret < 0))
  1093. break;
  1094. idx += ret;
  1095. if (unlikely(txreq.size < ETH_HLEN)) {
  1096. netdev_dbg(vif->dev,
  1097. "Bad packet size: %d\n", txreq.size);
  1098. xenvif_tx_err(vif, &txreq, idx);
  1099. break;
  1100. }
  1101. /* No crossing a page as the payload mustn't fragment. */
  1102. if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
  1103. netdev_err(vif->dev,
  1104. "txreq.offset: %x, size: %u, end: %lu\n",
  1105. txreq.offset, txreq.size,
  1106. (txreq.offset&~PAGE_MASK) + txreq.size);
  1107. xenvif_fatal_tx_err(vif);
  1108. break;
  1109. }
  1110. index = pending_index(vif->pending_cons);
  1111. pending_idx = vif->pending_ring[index];
  1112. data_len = (txreq.size > PKT_PROT_LEN &&
  1113. ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
  1114. PKT_PROT_LEN : txreq.size;
  1115. skb = xenvif_alloc_skb(data_len);
  1116. if (unlikely(skb == NULL)) {
  1117. netdev_dbg(vif->dev,
  1118. "Can't allocate a skb in start_xmit.\n");
  1119. xenvif_tx_err(vif, &txreq, idx);
  1120. break;
  1121. }
  1122. if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
  1123. struct xen_netif_extra_info *gso;
  1124. gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  1125. if (xenvif_set_skb_gso(vif, skb, gso)) {
  1126. /* Failure in xenvif_set_skb_gso is fatal. */
  1127. kfree_skb(skb);
  1128. break;
  1129. }
  1130. }
  1131. XENVIF_TX_CB(skb)->pending_idx = pending_idx;
  1132. __skb_put(skb, data_len);
  1133. vif->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
  1134. vif->tx_copy_ops[*copy_ops].source.domid = vif->domid;
  1135. vif->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
  1136. vif->tx_copy_ops[*copy_ops].dest.u.gmfn =
  1137. virt_to_mfn(skb->data);
  1138. vif->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
  1139. vif->tx_copy_ops[*copy_ops].dest.offset =
  1140. offset_in_page(skb->data);
  1141. vif->tx_copy_ops[*copy_ops].len = data_len;
  1142. vif->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
  1143. (*copy_ops)++;
  1144. skb_shinfo(skb)->nr_frags = ret;
  1145. if (data_len < txreq.size) {
  1146. skb_shinfo(skb)->nr_frags++;
  1147. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1148. pending_idx);
  1149. xenvif_tx_create_map_op(vif, pending_idx, &txreq, gop);
  1150. gop++;
  1151. } else {
  1152. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1153. INVALID_PENDING_IDX);
  1154. memcpy(&vif->pending_tx_info[pending_idx].req, &txreq,
  1155. sizeof(txreq));
  1156. }
  1157. vif->pending_cons++;
  1158. request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
  1159. if (request_gop == NULL) {
  1160. kfree_skb(skb);
  1161. xenvif_tx_err(vif, &txreq, idx);
  1162. break;
  1163. }
  1164. gop = request_gop;
  1165. __skb_queue_tail(&vif->tx_queue, skb);
  1166. vif->tx.req_cons = idx;
  1167. if (((gop-vif->tx_map_ops) >= ARRAY_SIZE(vif->tx_map_ops)) ||
  1168. (*copy_ops >= ARRAY_SIZE(vif->tx_copy_ops)))
  1169. break;
  1170. }
  1171. (*map_ops) = gop - vif->tx_map_ops;
  1172. return;
  1173. }
  1174. /* Consolidate skb with a frag_list into a brand new one with local pages on
  1175. * frags. Returns 0 or -ENOMEM if can't allocate new pages.
  1176. */
  1177. static int xenvif_handle_frag_list(struct xenvif *vif, struct sk_buff *skb)
  1178. {
  1179. unsigned int offset = skb_headlen(skb);
  1180. skb_frag_t frags[MAX_SKB_FRAGS];
  1181. int i;
  1182. struct ubuf_info *uarg;
  1183. struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
  1184. vif->tx_zerocopy_sent += 2;
  1185. vif->tx_frag_overflow++;
  1186. xenvif_fill_frags(vif, nskb);
  1187. /* Subtract frags size, we will correct it later */
  1188. skb->truesize -= skb->data_len;
  1189. skb->len += nskb->len;
  1190. skb->data_len += nskb->len;
  1191. /* create a brand new frags array and coalesce there */
  1192. for (i = 0; offset < skb->len; i++) {
  1193. struct page *page;
  1194. unsigned int len;
  1195. BUG_ON(i >= MAX_SKB_FRAGS);
  1196. page = alloc_page(GFP_ATOMIC|__GFP_COLD);
  1197. if (!page) {
  1198. int j;
  1199. skb->truesize += skb->data_len;
  1200. for (j = 0; j < i; j++)
  1201. put_page(frags[j].page.p);
  1202. return -ENOMEM;
  1203. }
  1204. if (offset + PAGE_SIZE < skb->len)
  1205. len = PAGE_SIZE;
  1206. else
  1207. len = skb->len - offset;
  1208. if (skb_copy_bits(skb, offset, page_address(page), len))
  1209. BUG();
  1210. offset += len;
  1211. frags[i].page.p = page;
  1212. frags[i].page_offset = 0;
  1213. skb_frag_size_set(&frags[i], len);
  1214. }
  1215. /* swap out with old one */
  1216. memcpy(skb_shinfo(skb)->frags,
  1217. frags,
  1218. i * sizeof(skb_frag_t));
  1219. skb_shinfo(skb)->nr_frags = i;
  1220. skb->truesize += i * PAGE_SIZE;
  1221. /* remove traces of mapped pages and frag_list */
  1222. skb_frag_list_init(skb);
  1223. uarg = skb_shinfo(skb)->destructor_arg;
  1224. uarg->callback(uarg, true);
  1225. skb_shinfo(skb)->destructor_arg = NULL;
  1226. skb_shinfo(nskb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
  1227. kfree_skb(nskb);
  1228. return 0;
  1229. }
  1230. static int xenvif_tx_submit(struct xenvif *vif)
  1231. {
  1232. struct gnttab_map_grant_ref *gop_map = vif->tx_map_ops;
  1233. struct gnttab_copy *gop_copy = vif->tx_copy_ops;
  1234. struct sk_buff *skb;
  1235. int work_done = 0;
  1236. while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
  1237. struct xen_netif_tx_request *txp;
  1238. u16 pending_idx;
  1239. unsigned data_len;
  1240. pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  1241. txp = &vif->pending_tx_info[pending_idx].req;
  1242. /* Check the remap error code. */
  1243. if (unlikely(xenvif_tx_check_gop(vif, skb, &gop_map, &gop_copy))) {
  1244. skb_shinfo(skb)->nr_frags = 0;
  1245. kfree_skb(skb);
  1246. continue;
  1247. }
  1248. data_len = skb->len;
  1249. callback_param(vif, pending_idx).ctx = NULL;
  1250. if (data_len < txp->size) {
  1251. /* Append the packet payload as a fragment. */
  1252. txp->offset += data_len;
  1253. txp->size -= data_len;
  1254. } else {
  1255. /* Schedule a response immediately. */
  1256. xenvif_idx_release(vif, pending_idx,
  1257. XEN_NETIF_RSP_OKAY);
  1258. }
  1259. if (txp->flags & XEN_NETTXF_csum_blank)
  1260. skb->ip_summed = CHECKSUM_PARTIAL;
  1261. else if (txp->flags & XEN_NETTXF_data_validated)
  1262. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1263. xenvif_fill_frags(vif, skb);
  1264. if (unlikely(skb_has_frag_list(skb))) {
  1265. if (xenvif_handle_frag_list(vif, skb)) {
  1266. if (net_ratelimit())
  1267. netdev_err(vif->dev,
  1268. "Not enough memory to consolidate frag_list!\n");
  1269. skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
  1270. kfree_skb(skb);
  1271. continue;
  1272. }
  1273. }
  1274. if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
  1275. int target = min_t(int, skb->len, PKT_PROT_LEN);
  1276. __pskb_pull_tail(skb, target - skb_headlen(skb));
  1277. }
  1278. skb->dev = vif->dev;
  1279. skb->protocol = eth_type_trans(skb, skb->dev);
  1280. skb_reset_network_header(skb);
  1281. if (checksum_setup(vif, skb)) {
  1282. netdev_dbg(vif->dev,
  1283. "Can't setup checksum in net_tx_action\n");
  1284. /* We have to set this flag to trigger the callback */
  1285. if (skb_shinfo(skb)->destructor_arg)
  1286. skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
  1287. kfree_skb(skb);
  1288. continue;
  1289. }
  1290. skb_probe_transport_header(skb, 0);
  1291. /* If the packet is GSO then we will have just set up the
  1292. * transport header offset in checksum_setup so it's now
  1293. * straightforward to calculate gso_segs.
  1294. */
  1295. if (skb_is_gso(skb)) {
  1296. int mss = skb_shinfo(skb)->gso_size;
  1297. int hdrlen = skb_transport_header(skb) -
  1298. skb_mac_header(skb) +
  1299. tcp_hdrlen(skb);
  1300. skb_shinfo(skb)->gso_segs =
  1301. DIV_ROUND_UP(skb->len - hdrlen, mss);
  1302. }
  1303. vif->dev->stats.rx_bytes += skb->len;
  1304. vif->dev->stats.rx_packets++;
  1305. work_done++;
  1306. /* Set this flag right before netif_receive_skb, otherwise
  1307. * someone might think this packet already left netback, and
  1308. * do a skb_copy_ubufs while we are still in control of the
  1309. * skb. E.g. the __pskb_pull_tail earlier can do such thing.
  1310. */
  1311. if (skb_shinfo(skb)->destructor_arg) {
  1312. skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
  1313. vif->tx_zerocopy_sent++;
  1314. }
  1315. netif_receive_skb(skb);
  1316. }
  1317. return work_done;
  1318. }
  1319. void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
  1320. {
  1321. unsigned long flags;
  1322. pending_ring_idx_t index;
  1323. struct xenvif *vif = ubuf_to_vif(ubuf);
  1324. /* This is the only place where we grab this lock, to protect callbacks
  1325. * from each other.
  1326. */
  1327. spin_lock_irqsave(&vif->callback_lock, flags);
  1328. do {
  1329. u16 pending_idx = ubuf->desc;
  1330. ubuf = (struct ubuf_info *) ubuf->ctx;
  1331. BUG_ON(vif->dealloc_prod - vif->dealloc_cons >=
  1332. MAX_PENDING_REQS);
  1333. index = pending_index(vif->dealloc_prod);
  1334. vif->dealloc_ring[index] = pending_idx;
  1335. /* Sync with xenvif_tx_dealloc_action:
  1336. * insert idx then incr producer.
  1337. */
  1338. smp_wmb();
  1339. vif->dealloc_prod++;
  1340. } while (ubuf);
  1341. wake_up(&vif->dealloc_wq);
  1342. spin_unlock_irqrestore(&vif->callback_lock, flags);
  1343. if (likely(zerocopy_success))
  1344. vif->tx_zerocopy_success++;
  1345. else
  1346. vif->tx_zerocopy_fail++;
  1347. }
  1348. static inline void xenvif_tx_dealloc_action(struct xenvif *vif)
  1349. {
  1350. struct gnttab_unmap_grant_ref *gop;
  1351. pending_ring_idx_t dc, dp;
  1352. u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
  1353. unsigned int i = 0;
  1354. dc = vif->dealloc_cons;
  1355. gop = vif->tx_unmap_ops;
  1356. /* Free up any grants we have finished using */
  1357. do {
  1358. dp = vif->dealloc_prod;
  1359. /* Ensure we see all indices enqueued by all
  1360. * xenvif_zerocopy_callback().
  1361. */
  1362. smp_rmb();
  1363. while (dc != dp) {
  1364. BUG_ON(gop - vif->tx_unmap_ops > MAX_PENDING_REQS);
  1365. pending_idx =
  1366. vif->dealloc_ring[pending_index(dc++)];
  1367. pending_idx_release[gop-vif->tx_unmap_ops] =
  1368. pending_idx;
  1369. vif->pages_to_unmap[gop-vif->tx_unmap_ops] =
  1370. vif->mmap_pages[pending_idx];
  1371. gnttab_set_unmap_op(gop,
  1372. idx_to_kaddr(vif, pending_idx),
  1373. GNTMAP_host_map,
  1374. vif->grant_tx_handle[pending_idx]);
  1375. xenvif_grant_handle_reset(vif, pending_idx);
  1376. ++gop;
  1377. }
  1378. } while (dp != vif->dealloc_prod);
  1379. vif->dealloc_cons = dc;
  1380. if (gop - vif->tx_unmap_ops > 0) {
  1381. int ret;
  1382. ret = gnttab_unmap_refs(vif->tx_unmap_ops,
  1383. NULL,
  1384. vif->pages_to_unmap,
  1385. gop - vif->tx_unmap_ops);
  1386. if (ret) {
  1387. netdev_err(vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
  1388. gop - vif->tx_unmap_ops, ret);
  1389. for (i = 0; i < gop - vif->tx_unmap_ops; ++i) {
  1390. if (gop[i].status != GNTST_okay)
  1391. netdev_err(vif->dev,
  1392. " host_addr: %llx handle: %x status: %d\n",
  1393. gop[i].host_addr,
  1394. gop[i].handle,
  1395. gop[i].status);
  1396. }
  1397. BUG();
  1398. }
  1399. }
  1400. for (i = 0; i < gop - vif->tx_unmap_ops; ++i)
  1401. xenvif_idx_release(vif, pending_idx_release[i],
  1402. XEN_NETIF_RSP_OKAY);
  1403. }
  1404. /* Called after netfront has transmitted */
  1405. int xenvif_tx_action(struct xenvif *vif, int budget)
  1406. {
  1407. unsigned nr_mops, nr_cops = 0;
  1408. int work_done, ret;
  1409. if (unlikely(!tx_work_todo(vif)))
  1410. return 0;
  1411. xenvif_tx_build_gops(vif, budget, &nr_cops, &nr_mops);
  1412. if (nr_cops == 0)
  1413. return 0;
  1414. gnttab_batch_copy(vif->tx_copy_ops, nr_cops);
  1415. if (nr_mops != 0) {
  1416. ret = gnttab_map_refs(vif->tx_map_ops,
  1417. NULL,
  1418. vif->pages_to_map,
  1419. nr_mops);
  1420. BUG_ON(ret);
  1421. }
  1422. work_done = xenvif_tx_submit(vif);
  1423. return work_done;
  1424. }
  1425. static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
  1426. u8 status)
  1427. {
  1428. struct pending_tx_info *pending_tx_info;
  1429. pending_ring_idx_t index;
  1430. unsigned long flags;
  1431. pending_tx_info = &vif->pending_tx_info[pending_idx];
  1432. spin_lock_irqsave(&vif->response_lock, flags);
  1433. make_tx_response(vif, &pending_tx_info->req, status);
  1434. index = pending_index(vif->pending_prod);
  1435. vif->pending_ring[index] = pending_idx;
  1436. /* TX shouldn't use the index before we give it back here */
  1437. mb();
  1438. vif->pending_prod++;
  1439. spin_unlock_irqrestore(&vif->response_lock, flags);
  1440. }
  1441. static void make_tx_response(struct xenvif *vif,
  1442. struct xen_netif_tx_request *txp,
  1443. s8 st)
  1444. {
  1445. RING_IDX i = vif->tx.rsp_prod_pvt;
  1446. struct xen_netif_tx_response *resp;
  1447. int notify;
  1448. resp = RING_GET_RESPONSE(&vif->tx, i);
  1449. resp->id = txp->id;
  1450. resp->status = st;
  1451. if (txp->flags & XEN_NETTXF_extra_info)
  1452. RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
  1453. vif->tx.rsp_prod_pvt = ++i;
  1454. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
  1455. if (notify)
  1456. notify_remote_via_irq(vif->tx_irq);
  1457. }
  1458. static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
  1459. u16 id,
  1460. s8 st,
  1461. u16 offset,
  1462. u16 size,
  1463. u16 flags)
  1464. {
  1465. RING_IDX i = vif->rx.rsp_prod_pvt;
  1466. struct xen_netif_rx_response *resp;
  1467. resp = RING_GET_RESPONSE(&vif->rx, i);
  1468. resp->offset = offset;
  1469. resp->flags = flags;
  1470. resp->id = id;
  1471. resp->status = (s16)size;
  1472. if (st < 0)
  1473. resp->status = (s16)st;
  1474. vif->rx.rsp_prod_pvt = ++i;
  1475. return resp;
  1476. }
  1477. void xenvif_idx_unmap(struct xenvif *vif, u16 pending_idx)
  1478. {
  1479. int ret;
  1480. struct gnttab_unmap_grant_ref tx_unmap_op;
  1481. gnttab_set_unmap_op(&tx_unmap_op,
  1482. idx_to_kaddr(vif, pending_idx),
  1483. GNTMAP_host_map,
  1484. vif->grant_tx_handle[pending_idx]);
  1485. xenvif_grant_handle_reset(vif, pending_idx);
  1486. ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
  1487. &vif->mmap_pages[pending_idx], 1);
  1488. if (ret) {
  1489. netdev_err(vif->dev,
  1490. "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
  1491. ret,
  1492. pending_idx,
  1493. tx_unmap_op.host_addr,
  1494. tx_unmap_op.handle,
  1495. tx_unmap_op.status);
  1496. BUG();
  1497. }
  1498. xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
  1499. }
  1500. static inline int rx_work_todo(struct xenvif *vif)
  1501. {
  1502. return (!skb_queue_empty(&vif->rx_queue) &&
  1503. xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots)) ||
  1504. vif->rx_queue_purge;
  1505. }
  1506. static inline int tx_work_todo(struct xenvif *vif)
  1507. {
  1508. if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)))
  1509. return 1;
  1510. return 0;
  1511. }
  1512. static inline bool tx_dealloc_work_todo(struct xenvif *vif)
  1513. {
  1514. return vif->dealloc_cons != vif->dealloc_prod;
  1515. }
  1516. void xenvif_unmap_frontend_rings(struct xenvif *vif)
  1517. {
  1518. if (vif->tx.sring)
  1519. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
  1520. vif->tx.sring);
  1521. if (vif->rx.sring)
  1522. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
  1523. vif->rx.sring);
  1524. }
  1525. int xenvif_map_frontend_rings(struct xenvif *vif,
  1526. grant_ref_t tx_ring_ref,
  1527. grant_ref_t rx_ring_ref)
  1528. {
  1529. void *addr;
  1530. struct xen_netif_tx_sring *txs;
  1531. struct xen_netif_rx_sring *rxs;
  1532. int err = -ENOMEM;
  1533. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
  1534. tx_ring_ref, &addr);
  1535. if (err)
  1536. goto err;
  1537. txs = (struct xen_netif_tx_sring *)addr;
  1538. BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
  1539. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
  1540. rx_ring_ref, &addr);
  1541. if (err)
  1542. goto err;
  1543. rxs = (struct xen_netif_rx_sring *)addr;
  1544. BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
  1545. return 0;
  1546. err:
  1547. xenvif_unmap_frontend_rings(vif);
  1548. return err;
  1549. }
  1550. void xenvif_stop_queue(struct xenvif *vif)
  1551. {
  1552. if (!vif->can_queue)
  1553. return;
  1554. netif_stop_queue(vif->dev);
  1555. }
  1556. static void xenvif_start_queue(struct xenvif *vif)
  1557. {
  1558. if (xenvif_schedulable(vif))
  1559. netif_wake_queue(vif->dev);
  1560. }
  1561. int xenvif_kthread_guest_rx(void *data)
  1562. {
  1563. struct xenvif *vif = data;
  1564. struct sk_buff *skb;
  1565. while (!kthread_should_stop()) {
  1566. wait_event_interruptible(vif->wq,
  1567. rx_work_todo(vif) ||
  1568. vif->disabled ||
  1569. kthread_should_stop());
  1570. /* This frontend is found to be rogue, disable it in
  1571. * kthread context. Currently this is only set when
  1572. * netback finds out frontend sends malformed packet,
  1573. * but we cannot disable the interface in softirq
  1574. * context so we defer it here.
  1575. */
  1576. if (unlikely(vif->disabled && netif_carrier_ok(vif->dev)))
  1577. xenvif_carrier_off(vif);
  1578. if (kthread_should_stop())
  1579. break;
  1580. if (vif->rx_queue_purge) {
  1581. skb_queue_purge(&vif->rx_queue);
  1582. vif->rx_queue_purge = false;
  1583. }
  1584. if (!skb_queue_empty(&vif->rx_queue))
  1585. xenvif_rx_action(vif);
  1586. if (skb_queue_empty(&vif->rx_queue) &&
  1587. netif_queue_stopped(vif->dev)) {
  1588. del_timer_sync(&vif->wake_queue);
  1589. xenvif_start_queue(vif);
  1590. }
  1591. cond_resched();
  1592. }
  1593. /* Bin any remaining skbs */
  1594. while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
  1595. dev_kfree_skb(skb);
  1596. return 0;
  1597. }
  1598. int xenvif_dealloc_kthread(void *data)
  1599. {
  1600. struct xenvif *vif = data;
  1601. while (!kthread_should_stop()) {
  1602. wait_event_interruptible(vif->dealloc_wq,
  1603. tx_dealloc_work_todo(vif) ||
  1604. kthread_should_stop());
  1605. if (kthread_should_stop())
  1606. break;
  1607. xenvif_tx_dealloc_action(vif);
  1608. cond_resched();
  1609. }
  1610. /* Unmap anything remaining*/
  1611. if (tx_dealloc_work_todo(vif))
  1612. xenvif_tx_dealloc_action(vif);
  1613. return 0;
  1614. }
  1615. static int __init netback_init(void)
  1616. {
  1617. int rc = 0;
  1618. if (!xen_domain())
  1619. return -ENODEV;
  1620. if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
  1621. pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
  1622. fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  1623. fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
  1624. }
  1625. rc = xenvif_xenbus_init();
  1626. if (rc)
  1627. goto failed_init;
  1628. rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
  1629. return 0;
  1630. failed_init:
  1631. return rc;
  1632. }
  1633. module_init(netback_init);
  1634. static void __exit netback_fini(void)
  1635. {
  1636. xenvif_xenbus_fini();
  1637. }
  1638. module_exit(netback_fini);
  1639. MODULE_LICENSE("Dual BSD/GPL");
  1640. MODULE_ALIAS("xen-backend:vif");