netback.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200
  1. /*
  2. * Back-end of the driver for virtual network devices. This portion of the
  3. * driver exports a 'unified' network-device interface that can be accessed
  4. * by any operating system that implements a compatible front end. A
  5. * reference front-end implementation can be found in:
  6. * drivers/net/xen-netfront.c
  7. *
  8. * Copyright (c) 2002-2005, K A Fraser
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License version 2
  12. * as published by the Free Software Foundation; or, when distributed
  13. * separately from the Linux kernel or incorporated into other
  14. * software packages, subject to the following license:
  15. *
  16. * Permission is hereby granted, free of charge, to any person obtaining a copy
  17. * of this source file (the "Software"), to deal in the Software without
  18. * restriction, including without limitation the rights to use, copy, modify,
  19. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20. * and to permit persons to whom the Software is furnished to do so, subject to
  21. * the following conditions:
  22. *
  23. * The above copyright notice and this permission notice shall be included in
  24. * all copies or substantial portions of the Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32. * IN THE SOFTWARE.
  33. */
  34. #include "common.h"
  35. #include <linux/kthread.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/udp.h>
  38. #include <linux/highmem.h>
  39. #include <net/tcp.h>
  40. #include <xen/xen.h>
  41. #include <xen/events.h>
  42. #include <xen/interface/memory.h>
  43. #include <xen/page.h>
  44. #include <asm/xen/hypercall.h>
  45. /* Provide an option to disable split event channels at load time as
  46. * event channels are limited resource. Split event channels are
  47. * enabled by default.
  48. */
  49. bool separate_tx_rx_irq = true;
  50. module_param(separate_tx_rx_irq, bool, 0644);
  51. /* The time that packets can stay on the guest Rx internal queue
  52. * before they are dropped.
  53. */
  54. unsigned int rx_drain_timeout_msecs = 10000;
  55. module_param(rx_drain_timeout_msecs, uint, 0444);
  56. /* The length of time before the frontend is considered unresponsive
  57. * because it isn't providing Rx slots.
  58. */
  59. unsigned int rx_stall_timeout_msecs = 60000;
  60. module_param(rx_stall_timeout_msecs, uint, 0444);
  61. unsigned int xenvif_max_queues;
  62. module_param_named(max_queues, xenvif_max_queues, uint, 0644);
  63. MODULE_PARM_DESC(max_queues,
  64. "Maximum number of queues per virtual interface");
  65. /*
  66. * This is the maximum slots a skb can have. If a guest sends a skb
  67. * which exceeds this limit it is considered malicious.
  68. */
  69. #define FATAL_SKB_SLOTS_DEFAULT 20
  70. static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  71. module_param(fatal_skb_slots, uint, 0444);
  72. /* The amount to copy out of the first guest Tx slot into the skb's
  73. * linear area. If the first slot has more data, it will be mapped
  74. * and put into the first frag.
  75. *
  76. * This is sized to avoid pulling headers from the frags for most
  77. * TCP/IP packets.
  78. */
  79. #define XEN_NETBACK_TX_COPY_LEN 128
  80. static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
  81. u8 status);
  82. static void make_tx_response(struct xenvif_queue *queue,
  83. struct xen_netif_tx_request *txp,
  84. s8 st);
  85. static void push_tx_responses(struct xenvif_queue *queue);
  86. static inline int tx_work_todo(struct xenvif_queue *queue);
  87. static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
  88. u16 id,
  89. s8 st,
  90. u16 offset,
  91. u16 size,
  92. u16 flags);
  93. static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
  94. u16 idx)
  95. {
  96. return page_to_pfn(queue->mmap_pages[idx]);
  97. }
  98. static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
  99. u16 idx)
  100. {
  101. return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
  102. }
  103. #define callback_param(vif, pending_idx) \
  104. (vif->pending_tx_info[pending_idx].callback_struct)
  105. /* Find the containing VIF's structure from a pointer in pending_tx_info array
  106. */
  107. static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
  108. {
  109. u16 pending_idx = ubuf->desc;
  110. struct pending_tx_info *temp =
  111. container_of(ubuf, struct pending_tx_info, callback_struct);
  112. return container_of(temp - pending_idx,
  113. struct xenvif_queue,
  114. pending_tx_info[0]);
  115. }
  116. static u16 frag_get_pending_idx(skb_frag_t *frag)
  117. {
  118. return (u16)frag->page_offset;
  119. }
  120. static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
  121. {
  122. frag->page_offset = pending_idx;
  123. }
  124. static inline pending_ring_idx_t pending_index(unsigned i)
  125. {
  126. return i & (MAX_PENDING_REQS-1);
  127. }
  128. static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
  129. {
  130. if (vif->gso_mask)
  131. return DIV_ROUND_UP(vif->dev->gso_max_size, XEN_PAGE_SIZE) + 1;
  132. else
  133. return DIV_ROUND_UP(vif->dev->mtu, XEN_PAGE_SIZE);
  134. }
  135. static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
  136. {
  137. RING_IDX prod, cons;
  138. int needed;
  139. needed = xenvif_rx_ring_slots_needed(queue->vif);
  140. do {
  141. prod = queue->rx.sring->req_prod;
  142. cons = queue->rx.req_cons;
  143. if (prod - cons >= needed)
  144. return true;
  145. queue->rx.sring->req_event = prod + 1;
  146. /* Make sure event is visible before we check prod
  147. * again.
  148. */
  149. mb();
  150. } while (queue->rx.sring->req_prod != prod);
  151. return false;
  152. }
  153. void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
  154. {
  155. unsigned long flags;
  156. spin_lock_irqsave(&queue->rx_queue.lock, flags);
  157. __skb_queue_tail(&queue->rx_queue, skb);
  158. queue->rx_queue_len += skb->len;
  159. if (queue->rx_queue_len > queue->rx_queue_max)
  160. netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
  161. spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
  162. }
  163. static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
  164. {
  165. struct sk_buff *skb;
  166. spin_lock_irq(&queue->rx_queue.lock);
  167. skb = __skb_dequeue(&queue->rx_queue);
  168. if (skb)
  169. queue->rx_queue_len -= skb->len;
  170. spin_unlock_irq(&queue->rx_queue.lock);
  171. return skb;
  172. }
  173. static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
  174. {
  175. spin_lock_irq(&queue->rx_queue.lock);
  176. if (queue->rx_queue_len < queue->rx_queue_max)
  177. netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
  178. spin_unlock_irq(&queue->rx_queue.lock);
  179. }
  180. static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
  181. {
  182. struct sk_buff *skb;
  183. while ((skb = xenvif_rx_dequeue(queue)) != NULL)
  184. kfree_skb(skb);
  185. }
  186. static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
  187. {
  188. struct sk_buff *skb;
  189. for(;;) {
  190. skb = skb_peek(&queue->rx_queue);
  191. if (!skb)
  192. break;
  193. if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
  194. break;
  195. xenvif_rx_dequeue(queue);
  196. kfree_skb(skb);
  197. }
  198. }
  199. struct netrx_pending_operations {
  200. unsigned copy_prod, copy_cons;
  201. unsigned meta_prod, meta_cons;
  202. struct gnttab_copy *copy;
  203. struct xenvif_rx_meta *meta;
  204. int copy_off;
  205. grant_ref_t copy_gref;
  206. };
  207. static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
  208. struct netrx_pending_operations *npo)
  209. {
  210. struct xenvif_rx_meta *meta;
  211. struct xen_netif_rx_request *req;
  212. req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
  213. meta = npo->meta + npo->meta_prod++;
  214. meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
  215. meta->gso_size = 0;
  216. meta->size = 0;
  217. meta->id = req->id;
  218. npo->copy_off = 0;
  219. npo->copy_gref = req->gref;
  220. return meta;
  221. }
  222. struct gop_frag_copy {
  223. struct xenvif_queue *queue;
  224. struct netrx_pending_operations *npo;
  225. struct xenvif_rx_meta *meta;
  226. int head;
  227. int gso_type;
  228. struct page *page;
  229. };
  230. static void xenvif_setup_copy_gop(unsigned long gfn,
  231. unsigned int offset,
  232. unsigned int *len,
  233. struct gop_frag_copy *info)
  234. {
  235. struct gnttab_copy *copy_gop;
  236. struct xen_page_foreign *foreign;
  237. /* Convenient aliases */
  238. struct xenvif_queue *queue = info->queue;
  239. struct netrx_pending_operations *npo = info->npo;
  240. struct page *page = info->page;
  241. BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
  242. if (npo->copy_off == MAX_BUFFER_OFFSET)
  243. info->meta = get_next_rx_buffer(queue, npo);
  244. if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
  245. *len = MAX_BUFFER_OFFSET - npo->copy_off;
  246. copy_gop = npo->copy + npo->copy_prod++;
  247. copy_gop->flags = GNTCOPY_dest_gref;
  248. copy_gop->len = *len;
  249. foreign = xen_page_foreign(page);
  250. if (foreign) {
  251. copy_gop->source.domid = foreign->domid;
  252. copy_gop->source.u.ref = foreign->gref;
  253. copy_gop->flags |= GNTCOPY_source_gref;
  254. } else {
  255. copy_gop->source.domid = DOMID_SELF;
  256. copy_gop->source.u.gmfn = gfn;
  257. }
  258. copy_gop->source.offset = offset;
  259. copy_gop->dest.domid = queue->vif->domid;
  260. copy_gop->dest.offset = npo->copy_off;
  261. copy_gop->dest.u.ref = npo->copy_gref;
  262. npo->copy_off += *len;
  263. info->meta->size += *len;
  264. /* Leave a gap for the GSO descriptor. */
  265. if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
  266. queue->rx.req_cons++;
  267. info->head = 0; /* There must be something in this buffer now */
  268. }
  269. static void xenvif_gop_frag_copy_grant(unsigned long gfn,
  270. unsigned offset,
  271. unsigned int len,
  272. void *data)
  273. {
  274. unsigned int bytes;
  275. while (len) {
  276. bytes = len;
  277. xenvif_setup_copy_gop(gfn, offset, &bytes, data);
  278. offset += bytes;
  279. len -= bytes;
  280. }
  281. }
  282. /*
  283. * Set up the grant operations for this fragment. If it's a flipping
  284. * interface, we also set up the unmap request from here.
  285. */
  286. static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
  287. struct netrx_pending_operations *npo,
  288. struct page *page, unsigned long size,
  289. unsigned long offset, int *head)
  290. {
  291. struct gop_frag_copy info = {
  292. .queue = queue,
  293. .npo = npo,
  294. .head = *head,
  295. .gso_type = XEN_NETIF_GSO_TYPE_NONE,
  296. };
  297. unsigned long bytes;
  298. if (skb_is_gso(skb)) {
  299. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  300. info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
  301. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  302. info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
  303. }
  304. /* Data must not cross a page boundary. */
  305. BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
  306. info.meta = npo->meta + npo->meta_prod - 1;
  307. /* Skip unused frames from start of page */
  308. page += offset >> PAGE_SHIFT;
  309. offset &= ~PAGE_MASK;
  310. while (size > 0) {
  311. BUG_ON(offset >= PAGE_SIZE);
  312. bytes = PAGE_SIZE - offset;
  313. if (bytes > size)
  314. bytes = size;
  315. info.page = page;
  316. gnttab_foreach_grant_in_range(page, offset, bytes,
  317. xenvif_gop_frag_copy_grant,
  318. &info);
  319. size -= bytes;
  320. offset = 0;
  321. /* Next page */
  322. if (size) {
  323. BUG_ON(!PageCompound(page));
  324. page++;
  325. }
  326. }
  327. *head = info.head;
  328. }
  329. /*
  330. * Prepare an SKB to be transmitted to the frontend.
  331. *
  332. * This function is responsible for allocating grant operations, meta
  333. * structures, etc.
  334. *
  335. * It returns the number of meta structures consumed. The number of
  336. * ring slots used is always equal to the number of meta slots used
  337. * plus the number of GSO descriptors used. Currently, we use either
  338. * zero GSO descriptors (for non-GSO packets) or one descriptor (for
  339. * frontend-side LRO).
  340. */
  341. static int xenvif_gop_skb(struct sk_buff *skb,
  342. struct netrx_pending_operations *npo,
  343. struct xenvif_queue *queue)
  344. {
  345. struct xenvif *vif = netdev_priv(skb->dev);
  346. int nr_frags = skb_shinfo(skb)->nr_frags;
  347. int i;
  348. struct xen_netif_rx_request *req;
  349. struct xenvif_rx_meta *meta;
  350. unsigned char *data;
  351. int head = 1;
  352. int old_meta_prod;
  353. int gso_type;
  354. old_meta_prod = npo->meta_prod;
  355. gso_type = XEN_NETIF_GSO_TYPE_NONE;
  356. if (skb_is_gso(skb)) {
  357. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  358. gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
  359. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  360. gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
  361. }
  362. /* Set up a GSO prefix descriptor, if necessary */
  363. if ((1 << gso_type) & vif->gso_prefix_mask) {
  364. req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
  365. meta = npo->meta + npo->meta_prod++;
  366. meta->gso_type = gso_type;
  367. meta->gso_size = skb_shinfo(skb)->gso_size;
  368. meta->size = 0;
  369. meta->id = req->id;
  370. }
  371. req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
  372. meta = npo->meta + npo->meta_prod++;
  373. if ((1 << gso_type) & vif->gso_mask) {
  374. meta->gso_type = gso_type;
  375. meta->gso_size = skb_shinfo(skb)->gso_size;
  376. } else {
  377. meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
  378. meta->gso_size = 0;
  379. }
  380. meta->size = 0;
  381. meta->id = req->id;
  382. npo->copy_off = 0;
  383. npo->copy_gref = req->gref;
  384. data = skb->data;
  385. while (data < skb_tail_pointer(skb)) {
  386. unsigned int offset = offset_in_page(data);
  387. unsigned int len = PAGE_SIZE - offset;
  388. if (data + len > skb_tail_pointer(skb))
  389. len = skb_tail_pointer(skb) - data;
  390. xenvif_gop_frag_copy(queue, skb, npo,
  391. virt_to_page(data), len, offset, &head);
  392. data += len;
  393. }
  394. for (i = 0; i < nr_frags; i++) {
  395. xenvif_gop_frag_copy(queue, skb, npo,
  396. skb_frag_page(&skb_shinfo(skb)->frags[i]),
  397. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  398. skb_shinfo(skb)->frags[i].page_offset,
  399. &head);
  400. }
  401. return npo->meta_prod - old_meta_prod;
  402. }
  403. /*
  404. * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
  405. * used to set up the operations on the top of
  406. * netrx_pending_operations, which have since been done. Check that
  407. * they didn't give any errors and advance over them.
  408. */
  409. static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
  410. struct netrx_pending_operations *npo)
  411. {
  412. struct gnttab_copy *copy_op;
  413. int status = XEN_NETIF_RSP_OKAY;
  414. int i;
  415. for (i = 0; i < nr_meta_slots; i++) {
  416. copy_op = npo->copy + npo->copy_cons++;
  417. if (copy_op->status != GNTST_okay) {
  418. netdev_dbg(vif->dev,
  419. "Bad status %d from copy to DOM%d.\n",
  420. copy_op->status, vif->domid);
  421. status = XEN_NETIF_RSP_ERROR;
  422. }
  423. }
  424. return status;
  425. }
  426. static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
  427. struct xenvif_rx_meta *meta,
  428. int nr_meta_slots)
  429. {
  430. int i;
  431. unsigned long offset;
  432. /* No fragments used */
  433. if (nr_meta_slots <= 1)
  434. return;
  435. nr_meta_slots--;
  436. for (i = 0; i < nr_meta_slots; i++) {
  437. int flags;
  438. if (i == nr_meta_slots - 1)
  439. flags = 0;
  440. else
  441. flags = XEN_NETRXF_more_data;
  442. offset = 0;
  443. make_rx_response(queue, meta[i].id, status, offset,
  444. meta[i].size, flags);
  445. }
  446. }
  447. void xenvif_kick_thread(struct xenvif_queue *queue)
  448. {
  449. wake_up(&queue->wq);
  450. }
  451. static void xenvif_rx_action(struct xenvif_queue *queue)
  452. {
  453. s8 status;
  454. u16 flags;
  455. struct xen_netif_rx_response *resp;
  456. struct sk_buff_head rxq;
  457. struct sk_buff *skb;
  458. LIST_HEAD(notify);
  459. int ret;
  460. unsigned long offset;
  461. bool need_to_notify = false;
  462. struct netrx_pending_operations npo = {
  463. .copy = queue->grant_copy_op,
  464. .meta = queue->meta,
  465. };
  466. skb_queue_head_init(&rxq);
  467. while (xenvif_rx_ring_slots_available(queue)
  468. && (skb = xenvif_rx_dequeue(queue)) != NULL) {
  469. queue->last_rx_time = jiffies;
  470. XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
  471. __skb_queue_tail(&rxq, skb);
  472. }
  473. BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
  474. if (!npo.copy_prod)
  475. goto done;
  476. BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
  477. gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
  478. while ((skb = __skb_dequeue(&rxq)) != NULL) {
  479. if ((1 << queue->meta[npo.meta_cons].gso_type) &
  480. queue->vif->gso_prefix_mask) {
  481. resp = RING_GET_RESPONSE(&queue->rx,
  482. queue->rx.rsp_prod_pvt++);
  483. resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
  484. resp->offset = queue->meta[npo.meta_cons].gso_size;
  485. resp->id = queue->meta[npo.meta_cons].id;
  486. resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
  487. npo.meta_cons++;
  488. XENVIF_RX_CB(skb)->meta_slots_used--;
  489. }
  490. queue->stats.tx_bytes += skb->len;
  491. queue->stats.tx_packets++;
  492. status = xenvif_check_gop(queue->vif,
  493. XENVIF_RX_CB(skb)->meta_slots_used,
  494. &npo);
  495. if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
  496. flags = 0;
  497. else
  498. flags = XEN_NETRXF_more_data;
  499. if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
  500. flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
  501. else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  502. /* remote but checksummed. */
  503. flags |= XEN_NETRXF_data_validated;
  504. offset = 0;
  505. resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
  506. status, offset,
  507. queue->meta[npo.meta_cons].size,
  508. flags);
  509. if ((1 << queue->meta[npo.meta_cons].gso_type) &
  510. queue->vif->gso_mask) {
  511. struct xen_netif_extra_info *gso =
  512. (struct xen_netif_extra_info *)
  513. RING_GET_RESPONSE(&queue->rx,
  514. queue->rx.rsp_prod_pvt++);
  515. resp->flags |= XEN_NETRXF_extra_info;
  516. gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
  517. gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
  518. gso->u.gso.pad = 0;
  519. gso->u.gso.features = 0;
  520. gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
  521. gso->flags = 0;
  522. }
  523. xenvif_add_frag_responses(queue, status,
  524. queue->meta + npo.meta_cons + 1,
  525. XENVIF_RX_CB(skb)->meta_slots_used);
  526. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
  527. need_to_notify |= !!ret;
  528. npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
  529. dev_kfree_skb(skb);
  530. }
  531. done:
  532. if (need_to_notify)
  533. notify_remote_via_irq(queue->rx_irq);
  534. }
  535. void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
  536. {
  537. int more_to_do;
  538. RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
  539. if (more_to_do)
  540. napi_schedule(&queue->napi);
  541. }
  542. static void tx_add_credit(struct xenvif_queue *queue)
  543. {
  544. unsigned long max_burst, max_credit;
  545. /*
  546. * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
  547. * Otherwise the interface can seize up due to insufficient credit.
  548. */
  549. max_burst = max(131072UL, queue->credit_bytes);
  550. /* Take care that adding a new chunk of credit doesn't wrap to zero. */
  551. max_credit = queue->remaining_credit + queue->credit_bytes;
  552. if (max_credit < queue->remaining_credit)
  553. max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
  554. queue->remaining_credit = min(max_credit, max_burst);
  555. }
  556. void xenvif_tx_credit_callback(unsigned long data)
  557. {
  558. struct xenvif_queue *queue = (struct xenvif_queue *)data;
  559. tx_add_credit(queue);
  560. xenvif_napi_schedule_or_enable_events(queue);
  561. }
  562. static void xenvif_tx_err(struct xenvif_queue *queue,
  563. struct xen_netif_tx_request *txp, RING_IDX end)
  564. {
  565. RING_IDX cons = queue->tx.req_cons;
  566. unsigned long flags;
  567. do {
  568. spin_lock_irqsave(&queue->response_lock, flags);
  569. make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
  570. push_tx_responses(queue);
  571. spin_unlock_irqrestore(&queue->response_lock, flags);
  572. if (cons == end)
  573. break;
  574. txp = RING_GET_REQUEST(&queue->tx, cons++);
  575. } while (1);
  576. queue->tx.req_cons = cons;
  577. }
  578. static void xenvif_fatal_tx_err(struct xenvif *vif)
  579. {
  580. netdev_err(vif->dev, "fatal error; disabling device\n");
  581. vif->disabled = true;
  582. /* Disable the vif from queue 0's kthread */
  583. if (vif->queues)
  584. xenvif_kick_thread(&vif->queues[0]);
  585. }
  586. static int xenvif_count_requests(struct xenvif_queue *queue,
  587. struct xen_netif_tx_request *first,
  588. struct xen_netif_tx_request *txp,
  589. int work_to_do)
  590. {
  591. RING_IDX cons = queue->tx.req_cons;
  592. int slots = 0;
  593. int drop_err = 0;
  594. int more_data;
  595. if (!(first->flags & XEN_NETTXF_more_data))
  596. return 0;
  597. do {
  598. struct xen_netif_tx_request dropped_tx = { 0 };
  599. if (slots >= work_to_do) {
  600. netdev_err(queue->vif->dev,
  601. "Asked for %d slots but exceeds this limit\n",
  602. work_to_do);
  603. xenvif_fatal_tx_err(queue->vif);
  604. return -ENODATA;
  605. }
  606. /* This guest is really using too many slots and
  607. * considered malicious.
  608. */
  609. if (unlikely(slots >= fatal_skb_slots)) {
  610. netdev_err(queue->vif->dev,
  611. "Malicious frontend using %d slots, threshold %u\n",
  612. slots, fatal_skb_slots);
  613. xenvif_fatal_tx_err(queue->vif);
  614. return -E2BIG;
  615. }
  616. /* Xen network protocol had implicit dependency on
  617. * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
  618. * the historical MAX_SKB_FRAGS value 18 to honor the
  619. * same behavior as before. Any packet using more than
  620. * 18 slots but less than fatal_skb_slots slots is
  621. * dropped
  622. */
  623. if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
  624. if (net_ratelimit())
  625. netdev_dbg(queue->vif->dev,
  626. "Too many slots (%d) exceeding limit (%d), dropping packet\n",
  627. slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  628. drop_err = -E2BIG;
  629. }
  630. if (drop_err)
  631. txp = &dropped_tx;
  632. memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
  633. sizeof(*txp));
  634. /* If the guest submitted a frame >= 64 KiB then
  635. * first->size overflowed and following slots will
  636. * appear to be larger than the frame.
  637. *
  638. * This cannot be fatal error as there are buggy
  639. * frontends that do this.
  640. *
  641. * Consume all slots and drop the packet.
  642. */
  643. if (!drop_err && txp->size > first->size) {
  644. if (net_ratelimit())
  645. netdev_dbg(queue->vif->dev,
  646. "Invalid tx request, slot size %u > remaining size %u\n",
  647. txp->size, first->size);
  648. drop_err = -EIO;
  649. }
  650. first->size -= txp->size;
  651. slots++;
  652. if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
  653. netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
  654. txp->offset, txp->size);
  655. xenvif_fatal_tx_err(queue->vif);
  656. return -EINVAL;
  657. }
  658. more_data = txp->flags & XEN_NETTXF_more_data;
  659. if (!drop_err)
  660. txp++;
  661. } while (more_data);
  662. if (drop_err) {
  663. xenvif_tx_err(queue, first, cons + slots);
  664. return drop_err;
  665. }
  666. return slots;
  667. }
  668. struct xenvif_tx_cb {
  669. u16 pending_idx;
  670. };
  671. #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
  672. static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
  673. u16 pending_idx,
  674. struct xen_netif_tx_request *txp,
  675. struct gnttab_map_grant_ref *mop)
  676. {
  677. queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
  678. gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
  679. GNTMAP_host_map | GNTMAP_readonly,
  680. txp->gref, queue->vif->domid);
  681. memcpy(&queue->pending_tx_info[pending_idx].req, txp,
  682. sizeof(*txp));
  683. }
  684. static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
  685. {
  686. struct sk_buff *skb =
  687. alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
  688. GFP_ATOMIC | __GFP_NOWARN);
  689. if (unlikely(skb == NULL))
  690. return NULL;
  691. /* Packets passed to netif_rx() must have some headroom. */
  692. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  693. /* Initialize it here to avoid later surprises */
  694. skb_shinfo(skb)->destructor_arg = NULL;
  695. return skb;
  696. }
  697. static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
  698. struct sk_buff *skb,
  699. struct xen_netif_tx_request *txp,
  700. struct gnttab_map_grant_ref *gop,
  701. unsigned int frag_overflow,
  702. struct sk_buff *nskb)
  703. {
  704. struct skb_shared_info *shinfo = skb_shinfo(skb);
  705. skb_frag_t *frags = shinfo->frags;
  706. u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  707. int start;
  708. pending_ring_idx_t index;
  709. unsigned int nr_slots;
  710. nr_slots = shinfo->nr_frags;
  711. /* Skip first skb fragment if it is on same page as header fragment. */
  712. start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
  713. for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
  714. shinfo->nr_frags++, txp++, gop++) {
  715. index = pending_index(queue->pending_cons++);
  716. pending_idx = queue->pending_ring[index];
  717. xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
  718. frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
  719. }
  720. if (frag_overflow) {
  721. shinfo = skb_shinfo(nskb);
  722. frags = shinfo->frags;
  723. for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
  724. shinfo->nr_frags++, txp++, gop++) {
  725. index = pending_index(queue->pending_cons++);
  726. pending_idx = queue->pending_ring[index];
  727. xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
  728. frag_set_pending_idx(&frags[shinfo->nr_frags],
  729. pending_idx);
  730. }
  731. skb_shinfo(skb)->frag_list = nskb;
  732. }
  733. return gop;
  734. }
  735. static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
  736. u16 pending_idx,
  737. grant_handle_t handle)
  738. {
  739. if (unlikely(queue->grant_tx_handle[pending_idx] !=
  740. NETBACK_INVALID_HANDLE)) {
  741. netdev_err(queue->vif->dev,
  742. "Trying to overwrite active handle! pending_idx: 0x%x\n",
  743. pending_idx);
  744. BUG();
  745. }
  746. queue->grant_tx_handle[pending_idx] = handle;
  747. }
  748. static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
  749. u16 pending_idx)
  750. {
  751. if (unlikely(queue->grant_tx_handle[pending_idx] ==
  752. NETBACK_INVALID_HANDLE)) {
  753. netdev_err(queue->vif->dev,
  754. "Trying to unmap invalid handle! pending_idx: 0x%x\n",
  755. pending_idx);
  756. BUG();
  757. }
  758. queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
  759. }
  760. static int xenvif_tx_check_gop(struct xenvif_queue *queue,
  761. struct sk_buff *skb,
  762. struct gnttab_map_grant_ref **gopp_map,
  763. struct gnttab_copy **gopp_copy)
  764. {
  765. struct gnttab_map_grant_ref *gop_map = *gopp_map;
  766. u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  767. /* This always points to the shinfo of the skb being checked, which
  768. * could be either the first or the one on the frag_list
  769. */
  770. struct skb_shared_info *shinfo = skb_shinfo(skb);
  771. /* If this is non-NULL, we are currently checking the frag_list skb, and
  772. * this points to the shinfo of the first one
  773. */
  774. struct skb_shared_info *first_shinfo = NULL;
  775. int nr_frags = shinfo->nr_frags;
  776. const bool sharedslot = nr_frags &&
  777. frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
  778. int i, err;
  779. /* Check status of header. */
  780. err = (*gopp_copy)->status;
  781. if (unlikely(err)) {
  782. if (net_ratelimit())
  783. netdev_dbg(queue->vif->dev,
  784. "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
  785. (*gopp_copy)->status,
  786. pending_idx,
  787. (*gopp_copy)->source.u.ref);
  788. /* The first frag might still have this slot mapped */
  789. if (!sharedslot)
  790. xenvif_idx_release(queue, pending_idx,
  791. XEN_NETIF_RSP_ERROR);
  792. }
  793. (*gopp_copy)++;
  794. check_frags:
  795. for (i = 0; i < nr_frags; i++, gop_map++) {
  796. int j, newerr;
  797. pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
  798. /* Check error status: if okay then remember grant handle. */
  799. newerr = gop_map->status;
  800. if (likely(!newerr)) {
  801. xenvif_grant_handle_set(queue,
  802. pending_idx,
  803. gop_map->handle);
  804. /* Had a previous error? Invalidate this fragment. */
  805. if (unlikely(err)) {
  806. xenvif_idx_unmap(queue, pending_idx);
  807. /* If the mapping of the first frag was OK, but
  808. * the header's copy failed, and they are
  809. * sharing a slot, send an error
  810. */
  811. if (i == 0 && sharedslot)
  812. xenvif_idx_release(queue, pending_idx,
  813. XEN_NETIF_RSP_ERROR);
  814. else
  815. xenvif_idx_release(queue, pending_idx,
  816. XEN_NETIF_RSP_OKAY);
  817. }
  818. continue;
  819. }
  820. /* Error on this fragment: respond to client with an error. */
  821. if (net_ratelimit())
  822. netdev_dbg(queue->vif->dev,
  823. "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
  824. i,
  825. gop_map->status,
  826. pending_idx,
  827. gop_map->ref);
  828. xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
  829. /* Not the first error? Preceding frags already invalidated. */
  830. if (err)
  831. continue;
  832. /* First error: if the header haven't shared a slot with the
  833. * first frag, release it as well.
  834. */
  835. if (!sharedslot)
  836. xenvif_idx_release(queue,
  837. XENVIF_TX_CB(skb)->pending_idx,
  838. XEN_NETIF_RSP_OKAY);
  839. /* Invalidate preceding fragments of this skb. */
  840. for (j = 0; j < i; j++) {
  841. pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
  842. xenvif_idx_unmap(queue, pending_idx);
  843. xenvif_idx_release(queue, pending_idx,
  844. XEN_NETIF_RSP_OKAY);
  845. }
  846. /* And if we found the error while checking the frag_list, unmap
  847. * the first skb's frags
  848. */
  849. if (first_shinfo) {
  850. for (j = 0; j < first_shinfo->nr_frags; j++) {
  851. pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
  852. xenvif_idx_unmap(queue, pending_idx);
  853. xenvif_idx_release(queue, pending_idx,
  854. XEN_NETIF_RSP_OKAY);
  855. }
  856. }
  857. /* Remember the error: invalidate all subsequent fragments. */
  858. err = newerr;
  859. }
  860. if (skb_has_frag_list(skb) && !first_shinfo) {
  861. first_shinfo = skb_shinfo(skb);
  862. shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
  863. nr_frags = shinfo->nr_frags;
  864. goto check_frags;
  865. }
  866. *gopp_map = gop_map;
  867. return err;
  868. }
  869. static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
  870. {
  871. struct skb_shared_info *shinfo = skb_shinfo(skb);
  872. int nr_frags = shinfo->nr_frags;
  873. int i;
  874. u16 prev_pending_idx = INVALID_PENDING_IDX;
  875. for (i = 0; i < nr_frags; i++) {
  876. skb_frag_t *frag = shinfo->frags + i;
  877. struct xen_netif_tx_request *txp;
  878. struct page *page;
  879. u16 pending_idx;
  880. pending_idx = frag_get_pending_idx(frag);
  881. /* If this is not the first frag, chain it to the previous*/
  882. if (prev_pending_idx == INVALID_PENDING_IDX)
  883. skb_shinfo(skb)->destructor_arg =
  884. &callback_param(queue, pending_idx);
  885. else
  886. callback_param(queue, prev_pending_idx).ctx =
  887. &callback_param(queue, pending_idx);
  888. callback_param(queue, pending_idx).ctx = NULL;
  889. prev_pending_idx = pending_idx;
  890. txp = &queue->pending_tx_info[pending_idx].req;
  891. page = virt_to_page(idx_to_kaddr(queue, pending_idx));
  892. __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
  893. skb->len += txp->size;
  894. skb->data_len += txp->size;
  895. skb->truesize += txp->size;
  896. /* Take an extra reference to offset network stack's put_page */
  897. get_page(queue->mmap_pages[pending_idx]);
  898. }
  899. }
  900. static int xenvif_get_extras(struct xenvif_queue *queue,
  901. struct xen_netif_extra_info *extras,
  902. int work_to_do)
  903. {
  904. struct xen_netif_extra_info extra;
  905. RING_IDX cons = queue->tx.req_cons;
  906. do {
  907. if (unlikely(work_to_do-- <= 0)) {
  908. netdev_err(queue->vif->dev, "Missing extra info\n");
  909. xenvif_fatal_tx_err(queue->vif);
  910. return -EBADR;
  911. }
  912. memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
  913. sizeof(extra));
  914. if (unlikely(!extra.type ||
  915. extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
  916. queue->tx.req_cons = ++cons;
  917. netdev_err(queue->vif->dev,
  918. "Invalid extra type: %d\n", extra.type);
  919. xenvif_fatal_tx_err(queue->vif);
  920. return -EINVAL;
  921. }
  922. memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
  923. queue->tx.req_cons = ++cons;
  924. } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
  925. return work_to_do;
  926. }
  927. static int xenvif_set_skb_gso(struct xenvif *vif,
  928. struct sk_buff *skb,
  929. struct xen_netif_extra_info *gso)
  930. {
  931. if (!gso->u.gso.size) {
  932. netdev_err(vif->dev, "GSO size must not be zero.\n");
  933. xenvif_fatal_tx_err(vif);
  934. return -EINVAL;
  935. }
  936. switch (gso->u.gso.type) {
  937. case XEN_NETIF_GSO_TYPE_TCPV4:
  938. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  939. break;
  940. case XEN_NETIF_GSO_TYPE_TCPV6:
  941. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  942. break;
  943. default:
  944. netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
  945. xenvif_fatal_tx_err(vif);
  946. return -EINVAL;
  947. }
  948. skb_shinfo(skb)->gso_size = gso->u.gso.size;
  949. /* gso_segs will be calculated later */
  950. return 0;
  951. }
  952. static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
  953. {
  954. bool recalculate_partial_csum = false;
  955. /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
  956. * peers can fail to set NETRXF_csum_blank when sending a GSO
  957. * frame. In this case force the SKB to CHECKSUM_PARTIAL and
  958. * recalculate the partial checksum.
  959. */
  960. if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
  961. queue->stats.rx_gso_checksum_fixup++;
  962. skb->ip_summed = CHECKSUM_PARTIAL;
  963. recalculate_partial_csum = true;
  964. }
  965. /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
  966. if (skb->ip_summed != CHECKSUM_PARTIAL)
  967. return 0;
  968. return skb_checksum_setup(skb, recalculate_partial_csum);
  969. }
  970. static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
  971. {
  972. u64 now = get_jiffies_64();
  973. u64 next_credit = queue->credit_window_start +
  974. msecs_to_jiffies(queue->credit_usec / 1000);
  975. /* Timer could already be pending in rare cases. */
  976. if (timer_pending(&queue->credit_timeout))
  977. return true;
  978. /* Passed the point where we can replenish credit? */
  979. if (time_after_eq64(now, next_credit)) {
  980. queue->credit_window_start = now;
  981. tx_add_credit(queue);
  982. }
  983. /* Still too big to send right now? Set a callback. */
  984. if (size > queue->remaining_credit) {
  985. queue->credit_timeout.data =
  986. (unsigned long)queue;
  987. mod_timer(&queue->credit_timeout,
  988. next_credit);
  989. queue->credit_window_start = next_credit;
  990. return true;
  991. }
  992. return false;
  993. }
  994. /* No locking is required in xenvif_mcast_add/del() as they are
  995. * only ever invoked from NAPI poll. An RCU list is used because
  996. * xenvif_mcast_match() is called asynchronously, during start_xmit.
  997. */
  998. static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
  999. {
  1000. struct xenvif_mcast_addr *mcast;
  1001. if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
  1002. if (net_ratelimit())
  1003. netdev_err(vif->dev,
  1004. "Too many multicast addresses\n");
  1005. return -ENOSPC;
  1006. }
  1007. mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
  1008. if (!mcast)
  1009. return -ENOMEM;
  1010. ether_addr_copy(mcast->addr, addr);
  1011. list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
  1012. vif->fe_mcast_count++;
  1013. return 0;
  1014. }
  1015. static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
  1016. {
  1017. struct xenvif_mcast_addr *mcast;
  1018. list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
  1019. if (ether_addr_equal(addr, mcast->addr)) {
  1020. --vif->fe_mcast_count;
  1021. list_del_rcu(&mcast->entry);
  1022. kfree_rcu(mcast, rcu);
  1023. break;
  1024. }
  1025. }
  1026. }
  1027. bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
  1028. {
  1029. struct xenvif_mcast_addr *mcast;
  1030. rcu_read_lock();
  1031. list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
  1032. if (ether_addr_equal(addr, mcast->addr)) {
  1033. rcu_read_unlock();
  1034. return true;
  1035. }
  1036. }
  1037. rcu_read_unlock();
  1038. return false;
  1039. }
  1040. void xenvif_mcast_addr_list_free(struct xenvif *vif)
  1041. {
  1042. /* No need for locking or RCU here. NAPI poll and TX queue
  1043. * are stopped.
  1044. */
  1045. while (!list_empty(&vif->fe_mcast_addr)) {
  1046. struct xenvif_mcast_addr *mcast;
  1047. mcast = list_first_entry(&vif->fe_mcast_addr,
  1048. struct xenvif_mcast_addr,
  1049. entry);
  1050. --vif->fe_mcast_count;
  1051. list_del(&mcast->entry);
  1052. kfree(mcast);
  1053. }
  1054. }
  1055. static void xenvif_tx_build_gops(struct xenvif_queue *queue,
  1056. int budget,
  1057. unsigned *copy_ops,
  1058. unsigned *map_ops)
  1059. {
  1060. struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
  1061. struct sk_buff *skb, *nskb;
  1062. int ret;
  1063. unsigned int frag_overflow;
  1064. while (skb_queue_len(&queue->tx_queue) < budget) {
  1065. struct xen_netif_tx_request txreq;
  1066. struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
  1067. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
  1068. u16 pending_idx;
  1069. RING_IDX idx;
  1070. int work_to_do;
  1071. unsigned int data_len;
  1072. pending_ring_idx_t index;
  1073. if (queue->tx.sring->req_prod - queue->tx.req_cons >
  1074. XEN_NETIF_TX_RING_SIZE) {
  1075. netdev_err(queue->vif->dev,
  1076. "Impossible number of requests. "
  1077. "req_prod %d, req_cons %d, size %ld\n",
  1078. queue->tx.sring->req_prod, queue->tx.req_cons,
  1079. XEN_NETIF_TX_RING_SIZE);
  1080. xenvif_fatal_tx_err(queue->vif);
  1081. break;
  1082. }
  1083. work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
  1084. if (!work_to_do)
  1085. break;
  1086. idx = queue->tx.req_cons;
  1087. rmb(); /* Ensure that we see the request before we copy it. */
  1088. memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
  1089. /* Credit-based scheduling. */
  1090. if (txreq.size > queue->remaining_credit &&
  1091. tx_credit_exceeded(queue, txreq.size))
  1092. break;
  1093. queue->remaining_credit -= txreq.size;
  1094. work_to_do--;
  1095. queue->tx.req_cons = ++idx;
  1096. memset(extras, 0, sizeof(extras));
  1097. if (txreq.flags & XEN_NETTXF_extra_info) {
  1098. work_to_do = xenvif_get_extras(queue, extras,
  1099. work_to_do);
  1100. idx = queue->tx.req_cons;
  1101. if (unlikely(work_to_do < 0))
  1102. break;
  1103. }
  1104. if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
  1105. struct xen_netif_extra_info *extra;
  1106. extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
  1107. ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
  1108. make_tx_response(queue, &txreq,
  1109. (ret == 0) ?
  1110. XEN_NETIF_RSP_OKAY :
  1111. XEN_NETIF_RSP_ERROR);
  1112. push_tx_responses(queue);
  1113. continue;
  1114. }
  1115. if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
  1116. struct xen_netif_extra_info *extra;
  1117. extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
  1118. xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
  1119. make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
  1120. push_tx_responses(queue);
  1121. continue;
  1122. }
  1123. ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
  1124. if (unlikely(ret < 0))
  1125. break;
  1126. idx += ret;
  1127. if (unlikely(txreq.size < ETH_HLEN)) {
  1128. netdev_dbg(queue->vif->dev,
  1129. "Bad packet size: %d\n", txreq.size);
  1130. xenvif_tx_err(queue, &txreq, idx);
  1131. break;
  1132. }
  1133. /* No crossing a page as the payload mustn't fragment. */
  1134. if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
  1135. netdev_err(queue->vif->dev,
  1136. "txreq.offset: %u, size: %u, end: %lu\n",
  1137. txreq.offset, txreq.size,
  1138. (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
  1139. xenvif_fatal_tx_err(queue->vif);
  1140. break;
  1141. }
  1142. index = pending_index(queue->pending_cons);
  1143. pending_idx = queue->pending_ring[index];
  1144. data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
  1145. ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
  1146. XEN_NETBACK_TX_COPY_LEN : txreq.size;
  1147. skb = xenvif_alloc_skb(data_len);
  1148. if (unlikely(skb == NULL)) {
  1149. netdev_dbg(queue->vif->dev,
  1150. "Can't allocate a skb in start_xmit.\n");
  1151. xenvif_tx_err(queue, &txreq, idx);
  1152. break;
  1153. }
  1154. skb_shinfo(skb)->nr_frags = ret;
  1155. if (data_len < txreq.size)
  1156. skb_shinfo(skb)->nr_frags++;
  1157. /* At this point shinfo->nr_frags is in fact the number of
  1158. * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
  1159. */
  1160. frag_overflow = 0;
  1161. nskb = NULL;
  1162. if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
  1163. frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
  1164. BUG_ON(frag_overflow > MAX_SKB_FRAGS);
  1165. skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
  1166. nskb = xenvif_alloc_skb(0);
  1167. if (unlikely(nskb == NULL)) {
  1168. kfree_skb(skb);
  1169. xenvif_tx_err(queue, &txreq, idx);
  1170. if (net_ratelimit())
  1171. netdev_err(queue->vif->dev,
  1172. "Can't allocate the frag_list skb.\n");
  1173. break;
  1174. }
  1175. }
  1176. if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
  1177. struct xen_netif_extra_info *gso;
  1178. gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  1179. if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
  1180. /* Failure in xenvif_set_skb_gso is fatal. */
  1181. kfree_skb(skb);
  1182. kfree_skb(nskb);
  1183. break;
  1184. }
  1185. }
  1186. XENVIF_TX_CB(skb)->pending_idx = pending_idx;
  1187. __skb_put(skb, data_len);
  1188. queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
  1189. queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
  1190. queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
  1191. queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
  1192. virt_to_gfn(skb->data);
  1193. queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
  1194. queue->tx_copy_ops[*copy_ops].dest.offset =
  1195. offset_in_page(skb->data) & ~XEN_PAGE_MASK;
  1196. queue->tx_copy_ops[*copy_ops].len = data_len;
  1197. queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
  1198. (*copy_ops)++;
  1199. if (data_len < txreq.size) {
  1200. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1201. pending_idx);
  1202. xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
  1203. gop++;
  1204. } else {
  1205. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1206. INVALID_PENDING_IDX);
  1207. memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
  1208. sizeof(txreq));
  1209. }
  1210. queue->pending_cons++;
  1211. gop = xenvif_get_requests(queue, skb, txfrags, gop,
  1212. frag_overflow, nskb);
  1213. __skb_queue_tail(&queue->tx_queue, skb);
  1214. queue->tx.req_cons = idx;
  1215. if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
  1216. (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
  1217. break;
  1218. }
  1219. (*map_ops) = gop - queue->tx_map_ops;
  1220. return;
  1221. }
  1222. /* Consolidate skb with a frag_list into a brand new one with local pages on
  1223. * frags. Returns 0 or -ENOMEM if can't allocate new pages.
  1224. */
  1225. static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
  1226. {
  1227. unsigned int offset = skb_headlen(skb);
  1228. skb_frag_t frags[MAX_SKB_FRAGS];
  1229. int i, f;
  1230. struct ubuf_info *uarg;
  1231. struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
  1232. queue->stats.tx_zerocopy_sent += 2;
  1233. queue->stats.tx_frag_overflow++;
  1234. xenvif_fill_frags(queue, nskb);
  1235. /* Subtract frags size, we will correct it later */
  1236. skb->truesize -= skb->data_len;
  1237. skb->len += nskb->len;
  1238. skb->data_len += nskb->len;
  1239. /* create a brand new frags array and coalesce there */
  1240. for (i = 0; offset < skb->len; i++) {
  1241. struct page *page;
  1242. unsigned int len;
  1243. BUG_ON(i >= MAX_SKB_FRAGS);
  1244. page = alloc_page(GFP_ATOMIC);
  1245. if (!page) {
  1246. int j;
  1247. skb->truesize += skb->data_len;
  1248. for (j = 0; j < i; j++)
  1249. put_page(frags[j].page.p);
  1250. return -ENOMEM;
  1251. }
  1252. if (offset + PAGE_SIZE < skb->len)
  1253. len = PAGE_SIZE;
  1254. else
  1255. len = skb->len - offset;
  1256. if (skb_copy_bits(skb, offset, page_address(page), len))
  1257. BUG();
  1258. offset += len;
  1259. frags[i].page.p = page;
  1260. frags[i].page_offset = 0;
  1261. skb_frag_size_set(&frags[i], len);
  1262. }
  1263. /* Copied all the bits from the frag list -- free it. */
  1264. skb_frag_list_init(skb);
  1265. xenvif_skb_zerocopy_prepare(queue, nskb);
  1266. kfree_skb(nskb);
  1267. /* Release all the original (foreign) frags. */
  1268. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  1269. skb_frag_unref(skb, f);
  1270. uarg = skb_shinfo(skb)->destructor_arg;
  1271. /* increase inflight counter to offset decrement in callback */
  1272. atomic_inc(&queue->inflight_packets);
  1273. uarg->callback(uarg, true);
  1274. skb_shinfo(skb)->destructor_arg = NULL;
  1275. /* Fill the skb with the new (local) frags. */
  1276. memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
  1277. skb_shinfo(skb)->nr_frags = i;
  1278. skb->truesize += i * PAGE_SIZE;
  1279. return 0;
  1280. }
  1281. static int xenvif_tx_submit(struct xenvif_queue *queue)
  1282. {
  1283. struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
  1284. struct gnttab_copy *gop_copy = queue->tx_copy_ops;
  1285. struct sk_buff *skb;
  1286. int work_done = 0;
  1287. while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
  1288. struct xen_netif_tx_request *txp;
  1289. u16 pending_idx;
  1290. unsigned data_len;
  1291. pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  1292. txp = &queue->pending_tx_info[pending_idx].req;
  1293. /* Check the remap error code. */
  1294. if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
  1295. /* If there was an error, xenvif_tx_check_gop is
  1296. * expected to release all the frags which were mapped,
  1297. * so kfree_skb shouldn't do it again
  1298. */
  1299. skb_shinfo(skb)->nr_frags = 0;
  1300. if (skb_has_frag_list(skb)) {
  1301. struct sk_buff *nskb =
  1302. skb_shinfo(skb)->frag_list;
  1303. skb_shinfo(nskb)->nr_frags = 0;
  1304. }
  1305. kfree_skb(skb);
  1306. continue;
  1307. }
  1308. data_len = skb->len;
  1309. callback_param(queue, pending_idx).ctx = NULL;
  1310. if (data_len < txp->size) {
  1311. /* Append the packet payload as a fragment. */
  1312. txp->offset += data_len;
  1313. txp->size -= data_len;
  1314. } else {
  1315. /* Schedule a response immediately. */
  1316. xenvif_idx_release(queue, pending_idx,
  1317. XEN_NETIF_RSP_OKAY);
  1318. }
  1319. if (txp->flags & XEN_NETTXF_csum_blank)
  1320. skb->ip_summed = CHECKSUM_PARTIAL;
  1321. else if (txp->flags & XEN_NETTXF_data_validated)
  1322. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1323. xenvif_fill_frags(queue, skb);
  1324. if (unlikely(skb_has_frag_list(skb))) {
  1325. if (xenvif_handle_frag_list(queue, skb)) {
  1326. if (net_ratelimit())
  1327. netdev_err(queue->vif->dev,
  1328. "Not enough memory to consolidate frag_list!\n");
  1329. xenvif_skb_zerocopy_prepare(queue, skb);
  1330. kfree_skb(skb);
  1331. continue;
  1332. }
  1333. }
  1334. skb->dev = queue->vif->dev;
  1335. skb->protocol = eth_type_trans(skb, skb->dev);
  1336. skb_reset_network_header(skb);
  1337. if (checksum_setup(queue, skb)) {
  1338. netdev_dbg(queue->vif->dev,
  1339. "Can't setup checksum in net_tx_action\n");
  1340. /* We have to set this flag to trigger the callback */
  1341. if (skb_shinfo(skb)->destructor_arg)
  1342. xenvif_skb_zerocopy_prepare(queue, skb);
  1343. kfree_skb(skb);
  1344. continue;
  1345. }
  1346. skb_probe_transport_header(skb, 0);
  1347. /* If the packet is GSO then we will have just set up the
  1348. * transport header offset in checksum_setup so it's now
  1349. * straightforward to calculate gso_segs.
  1350. */
  1351. if (skb_is_gso(skb)) {
  1352. int mss = skb_shinfo(skb)->gso_size;
  1353. int hdrlen = skb_transport_header(skb) -
  1354. skb_mac_header(skb) +
  1355. tcp_hdrlen(skb);
  1356. skb_shinfo(skb)->gso_segs =
  1357. DIV_ROUND_UP(skb->len - hdrlen, mss);
  1358. }
  1359. queue->stats.rx_bytes += skb->len;
  1360. queue->stats.rx_packets++;
  1361. work_done++;
  1362. /* Set this flag right before netif_receive_skb, otherwise
  1363. * someone might think this packet already left netback, and
  1364. * do a skb_copy_ubufs while we are still in control of the
  1365. * skb. E.g. the __pskb_pull_tail earlier can do such thing.
  1366. */
  1367. if (skb_shinfo(skb)->destructor_arg) {
  1368. xenvif_skb_zerocopy_prepare(queue, skb);
  1369. queue->stats.tx_zerocopy_sent++;
  1370. }
  1371. netif_receive_skb(skb);
  1372. }
  1373. return work_done;
  1374. }
  1375. void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
  1376. {
  1377. unsigned long flags;
  1378. pending_ring_idx_t index;
  1379. struct xenvif_queue *queue = ubuf_to_queue(ubuf);
  1380. /* This is the only place where we grab this lock, to protect callbacks
  1381. * from each other.
  1382. */
  1383. spin_lock_irqsave(&queue->callback_lock, flags);
  1384. do {
  1385. u16 pending_idx = ubuf->desc;
  1386. ubuf = (struct ubuf_info *) ubuf->ctx;
  1387. BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
  1388. MAX_PENDING_REQS);
  1389. index = pending_index(queue->dealloc_prod);
  1390. queue->dealloc_ring[index] = pending_idx;
  1391. /* Sync with xenvif_tx_dealloc_action:
  1392. * insert idx then incr producer.
  1393. */
  1394. smp_wmb();
  1395. queue->dealloc_prod++;
  1396. } while (ubuf);
  1397. spin_unlock_irqrestore(&queue->callback_lock, flags);
  1398. if (likely(zerocopy_success))
  1399. queue->stats.tx_zerocopy_success++;
  1400. else
  1401. queue->stats.tx_zerocopy_fail++;
  1402. xenvif_skb_zerocopy_complete(queue);
  1403. }
  1404. static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
  1405. {
  1406. struct gnttab_unmap_grant_ref *gop;
  1407. pending_ring_idx_t dc, dp;
  1408. u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
  1409. unsigned int i = 0;
  1410. dc = queue->dealloc_cons;
  1411. gop = queue->tx_unmap_ops;
  1412. /* Free up any grants we have finished using */
  1413. do {
  1414. dp = queue->dealloc_prod;
  1415. /* Ensure we see all indices enqueued by all
  1416. * xenvif_zerocopy_callback().
  1417. */
  1418. smp_rmb();
  1419. while (dc != dp) {
  1420. BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
  1421. pending_idx =
  1422. queue->dealloc_ring[pending_index(dc++)];
  1423. pending_idx_release[gop - queue->tx_unmap_ops] =
  1424. pending_idx;
  1425. queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
  1426. queue->mmap_pages[pending_idx];
  1427. gnttab_set_unmap_op(gop,
  1428. idx_to_kaddr(queue, pending_idx),
  1429. GNTMAP_host_map,
  1430. queue->grant_tx_handle[pending_idx]);
  1431. xenvif_grant_handle_reset(queue, pending_idx);
  1432. ++gop;
  1433. }
  1434. } while (dp != queue->dealloc_prod);
  1435. queue->dealloc_cons = dc;
  1436. if (gop - queue->tx_unmap_ops > 0) {
  1437. int ret;
  1438. ret = gnttab_unmap_refs(queue->tx_unmap_ops,
  1439. NULL,
  1440. queue->pages_to_unmap,
  1441. gop - queue->tx_unmap_ops);
  1442. if (ret) {
  1443. netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
  1444. gop - queue->tx_unmap_ops, ret);
  1445. for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
  1446. if (gop[i].status != GNTST_okay)
  1447. netdev_err(queue->vif->dev,
  1448. " host_addr: 0x%llx handle: 0x%x status: %d\n",
  1449. gop[i].host_addr,
  1450. gop[i].handle,
  1451. gop[i].status);
  1452. }
  1453. BUG();
  1454. }
  1455. }
  1456. for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
  1457. xenvif_idx_release(queue, pending_idx_release[i],
  1458. XEN_NETIF_RSP_OKAY);
  1459. }
  1460. /* Called after netfront has transmitted */
  1461. int xenvif_tx_action(struct xenvif_queue *queue, int budget)
  1462. {
  1463. unsigned nr_mops, nr_cops = 0;
  1464. int work_done, ret;
  1465. if (unlikely(!tx_work_todo(queue)))
  1466. return 0;
  1467. xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
  1468. if (nr_cops == 0)
  1469. return 0;
  1470. gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
  1471. if (nr_mops != 0) {
  1472. ret = gnttab_map_refs(queue->tx_map_ops,
  1473. NULL,
  1474. queue->pages_to_map,
  1475. nr_mops);
  1476. BUG_ON(ret);
  1477. }
  1478. work_done = xenvif_tx_submit(queue);
  1479. return work_done;
  1480. }
  1481. static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
  1482. u8 status)
  1483. {
  1484. struct pending_tx_info *pending_tx_info;
  1485. pending_ring_idx_t index;
  1486. unsigned long flags;
  1487. pending_tx_info = &queue->pending_tx_info[pending_idx];
  1488. spin_lock_irqsave(&queue->response_lock, flags);
  1489. make_tx_response(queue, &pending_tx_info->req, status);
  1490. /* Release the pending index before pusing the Tx response so
  1491. * its available before a new Tx request is pushed by the
  1492. * frontend.
  1493. */
  1494. index = pending_index(queue->pending_prod++);
  1495. queue->pending_ring[index] = pending_idx;
  1496. push_tx_responses(queue);
  1497. spin_unlock_irqrestore(&queue->response_lock, flags);
  1498. }
  1499. static void make_tx_response(struct xenvif_queue *queue,
  1500. struct xen_netif_tx_request *txp,
  1501. s8 st)
  1502. {
  1503. RING_IDX i = queue->tx.rsp_prod_pvt;
  1504. struct xen_netif_tx_response *resp;
  1505. resp = RING_GET_RESPONSE(&queue->tx, i);
  1506. resp->id = txp->id;
  1507. resp->status = st;
  1508. if (txp->flags & XEN_NETTXF_extra_info)
  1509. RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
  1510. queue->tx.rsp_prod_pvt = ++i;
  1511. }
  1512. static void push_tx_responses(struct xenvif_queue *queue)
  1513. {
  1514. int notify;
  1515. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
  1516. if (notify)
  1517. notify_remote_via_irq(queue->tx_irq);
  1518. }
  1519. static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
  1520. u16 id,
  1521. s8 st,
  1522. u16 offset,
  1523. u16 size,
  1524. u16 flags)
  1525. {
  1526. RING_IDX i = queue->rx.rsp_prod_pvt;
  1527. struct xen_netif_rx_response *resp;
  1528. resp = RING_GET_RESPONSE(&queue->rx, i);
  1529. resp->offset = offset;
  1530. resp->flags = flags;
  1531. resp->id = id;
  1532. resp->status = (s16)size;
  1533. if (st < 0)
  1534. resp->status = (s16)st;
  1535. queue->rx.rsp_prod_pvt = ++i;
  1536. return resp;
  1537. }
  1538. void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
  1539. {
  1540. int ret;
  1541. struct gnttab_unmap_grant_ref tx_unmap_op;
  1542. gnttab_set_unmap_op(&tx_unmap_op,
  1543. idx_to_kaddr(queue, pending_idx),
  1544. GNTMAP_host_map,
  1545. queue->grant_tx_handle[pending_idx]);
  1546. xenvif_grant_handle_reset(queue, pending_idx);
  1547. ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
  1548. &queue->mmap_pages[pending_idx], 1);
  1549. if (ret) {
  1550. netdev_err(queue->vif->dev,
  1551. "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
  1552. ret,
  1553. pending_idx,
  1554. tx_unmap_op.host_addr,
  1555. tx_unmap_op.handle,
  1556. tx_unmap_op.status);
  1557. BUG();
  1558. }
  1559. }
  1560. static inline int tx_work_todo(struct xenvif_queue *queue)
  1561. {
  1562. if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
  1563. return 1;
  1564. return 0;
  1565. }
  1566. static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
  1567. {
  1568. return queue->dealloc_cons != queue->dealloc_prod;
  1569. }
  1570. void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
  1571. {
  1572. if (queue->tx.sring)
  1573. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
  1574. queue->tx.sring);
  1575. if (queue->rx.sring)
  1576. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
  1577. queue->rx.sring);
  1578. }
  1579. int xenvif_map_frontend_rings(struct xenvif_queue *queue,
  1580. grant_ref_t tx_ring_ref,
  1581. grant_ref_t rx_ring_ref)
  1582. {
  1583. void *addr;
  1584. struct xen_netif_tx_sring *txs;
  1585. struct xen_netif_rx_sring *rxs;
  1586. int err = -ENOMEM;
  1587. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
  1588. &tx_ring_ref, 1, &addr);
  1589. if (err)
  1590. goto err;
  1591. txs = (struct xen_netif_tx_sring *)addr;
  1592. BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
  1593. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
  1594. &rx_ring_ref, 1, &addr);
  1595. if (err)
  1596. goto err;
  1597. rxs = (struct xen_netif_rx_sring *)addr;
  1598. BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
  1599. return 0;
  1600. err:
  1601. xenvif_unmap_frontend_rings(queue);
  1602. return err;
  1603. }
  1604. static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
  1605. {
  1606. struct xenvif *vif = queue->vif;
  1607. queue->stalled = true;
  1608. /* At least one queue has stalled? Disable the carrier. */
  1609. spin_lock(&vif->lock);
  1610. if (vif->stalled_queues++ == 0) {
  1611. netdev_info(vif->dev, "Guest Rx stalled");
  1612. netif_carrier_off(vif->dev);
  1613. }
  1614. spin_unlock(&vif->lock);
  1615. }
  1616. static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
  1617. {
  1618. struct xenvif *vif = queue->vif;
  1619. queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
  1620. queue->stalled = false;
  1621. /* All queues are ready? Enable the carrier. */
  1622. spin_lock(&vif->lock);
  1623. if (--vif->stalled_queues == 0) {
  1624. netdev_info(vif->dev, "Guest Rx ready");
  1625. netif_carrier_on(vif->dev);
  1626. }
  1627. spin_unlock(&vif->lock);
  1628. }
  1629. static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
  1630. {
  1631. RING_IDX prod, cons;
  1632. prod = queue->rx.sring->req_prod;
  1633. cons = queue->rx.req_cons;
  1634. return !queue->stalled && prod - cons < 1
  1635. && time_after(jiffies,
  1636. queue->last_rx_time + queue->vif->stall_timeout);
  1637. }
  1638. static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
  1639. {
  1640. RING_IDX prod, cons;
  1641. prod = queue->rx.sring->req_prod;
  1642. cons = queue->rx.req_cons;
  1643. return queue->stalled && prod - cons >= 1;
  1644. }
  1645. static bool xenvif_have_rx_work(struct xenvif_queue *queue)
  1646. {
  1647. return (!skb_queue_empty(&queue->rx_queue)
  1648. && xenvif_rx_ring_slots_available(queue))
  1649. || (queue->vif->stall_timeout &&
  1650. (xenvif_rx_queue_stalled(queue)
  1651. || xenvif_rx_queue_ready(queue)))
  1652. || kthread_should_stop()
  1653. || queue->vif->disabled;
  1654. }
  1655. static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
  1656. {
  1657. struct sk_buff *skb;
  1658. long timeout;
  1659. skb = skb_peek(&queue->rx_queue);
  1660. if (!skb)
  1661. return MAX_SCHEDULE_TIMEOUT;
  1662. timeout = XENVIF_RX_CB(skb)->expires - jiffies;
  1663. return timeout < 0 ? 0 : timeout;
  1664. }
  1665. /* Wait until the guest Rx thread has work.
  1666. *
  1667. * The timeout needs to be adjusted based on the current head of the
  1668. * queue (and not just the head at the beginning). In particular, if
  1669. * the queue is initially empty an infinite timeout is used and this
  1670. * needs to be reduced when a skb is queued.
  1671. *
  1672. * This cannot be done with wait_event_timeout() because it only
  1673. * calculates the timeout once.
  1674. */
  1675. static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
  1676. {
  1677. DEFINE_WAIT(wait);
  1678. if (xenvif_have_rx_work(queue))
  1679. return;
  1680. for (;;) {
  1681. long ret;
  1682. prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
  1683. if (xenvif_have_rx_work(queue))
  1684. break;
  1685. ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
  1686. if (!ret)
  1687. break;
  1688. }
  1689. finish_wait(&queue->wq, &wait);
  1690. }
  1691. int xenvif_kthread_guest_rx(void *data)
  1692. {
  1693. struct xenvif_queue *queue = data;
  1694. struct xenvif *vif = queue->vif;
  1695. if (!vif->stall_timeout)
  1696. xenvif_queue_carrier_on(queue);
  1697. for (;;) {
  1698. xenvif_wait_for_rx_work(queue);
  1699. if (kthread_should_stop())
  1700. break;
  1701. /* This frontend is found to be rogue, disable it in
  1702. * kthread context. Currently this is only set when
  1703. * netback finds out frontend sends malformed packet,
  1704. * but we cannot disable the interface in softirq
  1705. * context so we defer it here, if this thread is
  1706. * associated with queue 0.
  1707. */
  1708. if (unlikely(vif->disabled && queue->id == 0)) {
  1709. xenvif_carrier_off(vif);
  1710. break;
  1711. }
  1712. if (!skb_queue_empty(&queue->rx_queue))
  1713. xenvif_rx_action(queue);
  1714. /* If the guest hasn't provided any Rx slots for a
  1715. * while it's probably not responsive, drop the
  1716. * carrier so packets are dropped earlier.
  1717. */
  1718. if (vif->stall_timeout) {
  1719. if (xenvif_rx_queue_stalled(queue))
  1720. xenvif_queue_carrier_off(queue);
  1721. else if (xenvif_rx_queue_ready(queue))
  1722. xenvif_queue_carrier_on(queue);
  1723. }
  1724. /* Queued packets may have foreign pages from other
  1725. * domains. These cannot be queued indefinitely as
  1726. * this would starve guests of grant refs and transmit
  1727. * slots.
  1728. */
  1729. xenvif_rx_queue_drop_expired(queue);
  1730. xenvif_rx_queue_maybe_wake(queue);
  1731. cond_resched();
  1732. }
  1733. /* Bin any remaining skbs */
  1734. xenvif_rx_queue_purge(queue);
  1735. return 0;
  1736. }
  1737. static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
  1738. {
  1739. /* Dealloc thread must remain running until all inflight
  1740. * packets complete.
  1741. */
  1742. return kthread_should_stop() &&
  1743. !atomic_read(&queue->inflight_packets);
  1744. }
  1745. int xenvif_dealloc_kthread(void *data)
  1746. {
  1747. struct xenvif_queue *queue = data;
  1748. for (;;) {
  1749. wait_event_interruptible(queue->dealloc_wq,
  1750. tx_dealloc_work_todo(queue) ||
  1751. xenvif_dealloc_kthread_should_stop(queue));
  1752. if (xenvif_dealloc_kthread_should_stop(queue))
  1753. break;
  1754. xenvif_tx_dealloc_action(queue);
  1755. cond_resched();
  1756. }
  1757. /* Unmap anything remaining*/
  1758. if (tx_dealloc_work_todo(queue))
  1759. xenvif_tx_dealloc_action(queue);
  1760. return 0;
  1761. }
  1762. static int __init netback_init(void)
  1763. {
  1764. int rc = 0;
  1765. if (!xen_domain())
  1766. return -ENODEV;
  1767. /* Allow as many queues as there are CPUs if user has not
  1768. * specified a value.
  1769. */
  1770. if (xenvif_max_queues == 0)
  1771. xenvif_max_queues = num_online_cpus();
  1772. if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
  1773. pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
  1774. fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  1775. fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
  1776. }
  1777. rc = xenvif_xenbus_init();
  1778. if (rc)
  1779. goto failed_init;
  1780. #ifdef CONFIG_DEBUG_FS
  1781. xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
  1782. if (IS_ERR_OR_NULL(xen_netback_dbg_root))
  1783. pr_warn("Init of debugfs returned %ld!\n",
  1784. PTR_ERR(xen_netback_dbg_root));
  1785. #endif /* CONFIG_DEBUG_FS */
  1786. return 0;
  1787. failed_init:
  1788. return rc;
  1789. }
  1790. module_init(netback_init);
  1791. static void __exit netback_fini(void)
  1792. {
  1793. #ifdef CONFIG_DEBUG_FS
  1794. if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
  1795. debugfs_remove_recursive(xen_netback_dbg_root);
  1796. #endif /* CONFIG_DEBUG_FS */
  1797. xenvif_xenbus_fini();
  1798. }
  1799. module_exit(netback_fini);
  1800. MODULE_LICENSE("Dual BSD/GPL");
  1801. MODULE_ALIAS("xen-backend:vif");