netback.c 60 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350
  1. /*
  2. * Back-end of the driver for virtual network devices. This portion of the
  3. * driver exports a 'unified' network-device interface that can be accessed
  4. * by any operating system that implements a compatible front end. A
  5. * reference front-end implementation can be found in:
  6. * drivers/net/xen-netfront.c
  7. *
  8. * Copyright (c) 2002-2005, K A Fraser
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License version 2
  12. * as published by the Free Software Foundation; or, when distributed
  13. * separately from the Linux kernel or incorporated into other
  14. * software packages, subject to the following license:
  15. *
  16. * Permission is hereby granted, free of charge, to any person obtaining a copy
  17. * of this source file (the "Software"), to deal in the Software without
  18. * restriction, including without limitation the rights to use, copy, modify,
  19. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  20. * and to permit persons to whom the Software is furnished to do so, subject to
  21. * the following conditions:
  22. *
  23. * The above copyright notice and this permission notice shall be included in
  24. * all copies or substantial portions of the Software.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  27. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  28. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  29. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  30. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  31. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  32. * IN THE SOFTWARE.
  33. */
  34. #include "common.h"
  35. #include <linux/kthread.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/udp.h>
  38. #include <linux/highmem.h>
  39. #include <net/tcp.h>
  40. #include <xen/xen.h>
  41. #include <xen/events.h>
  42. #include <xen/interface/memory.h>
  43. #include <xen/page.h>
  44. #include <asm/xen/hypercall.h>
  45. /* Provide an option to disable split event channels at load time as
  46. * event channels are limited resource. Split event channels are
  47. * enabled by default.
  48. */
  49. bool separate_tx_rx_irq = true;
  50. module_param(separate_tx_rx_irq, bool, 0644);
  51. /* The time that packets can stay on the guest Rx internal queue
  52. * before they are dropped.
  53. */
  54. unsigned int rx_drain_timeout_msecs = 10000;
  55. module_param(rx_drain_timeout_msecs, uint, 0444);
  56. /* The length of time before the frontend is considered unresponsive
  57. * because it isn't providing Rx slots.
  58. */
  59. unsigned int rx_stall_timeout_msecs = 60000;
  60. module_param(rx_stall_timeout_msecs, uint, 0444);
  61. unsigned int xenvif_max_queues;
  62. module_param_named(max_queues, xenvif_max_queues, uint, 0644);
  63. MODULE_PARM_DESC(max_queues,
  64. "Maximum number of queues per virtual interface");
  65. /*
  66. * This is the maximum slots a skb can have. If a guest sends a skb
  67. * which exceeds this limit it is considered malicious.
  68. */
  69. #define FATAL_SKB_SLOTS_DEFAULT 20
  70. static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
  71. module_param(fatal_skb_slots, uint, 0444);
  72. /* The amount to copy out of the first guest Tx slot into the skb's
  73. * linear area. If the first slot has more data, it will be mapped
  74. * and put into the first frag.
  75. *
  76. * This is sized to avoid pulling headers from the frags for most
  77. * TCP/IP packets.
  78. */
  79. #define XEN_NETBACK_TX_COPY_LEN 128
  80. /* This is the maximum number of flows in the hash cache. */
  81. #define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
  82. unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
  83. module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
  84. MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
  85. static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
  86. u8 status);
  87. static void make_tx_response(struct xenvif_queue *queue,
  88. struct xen_netif_tx_request *txp,
  89. unsigned int extra_count,
  90. s8 st);
  91. static void push_tx_responses(struct xenvif_queue *queue);
  92. static inline int tx_work_todo(struct xenvif_queue *queue);
  93. static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
  94. u16 id,
  95. s8 st,
  96. u16 offset,
  97. u16 size,
  98. u16 flags);
  99. static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
  100. u16 idx)
  101. {
  102. return page_to_pfn(queue->mmap_pages[idx]);
  103. }
  104. static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
  105. u16 idx)
  106. {
  107. return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
  108. }
  109. #define callback_param(vif, pending_idx) \
  110. (vif->pending_tx_info[pending_idx].callback_struct)
  111. /* Find the containing VIF's structure from a pointer in pending_tx_info array
  112. */
  113. static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
  114. {
  115. u16 pending_idx = ubuf->desc;
  116. struct pending_tx_info *temp =
  117. container_of(ubuf, struct pending_tx_info, callback_struct);
  118. return container_of(temp - pending_idx,
  119. struct xenvif_queue,
  120. pending_tx_info[0]);
  121. }
  122. static u16 frag_get_pending_idx(skb_frag_t *frag)
  123. {
  124. return (u16)frag->page_offset;
  125. }
  126. static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
  127. {
  128. frag->page_offset = pending_idx;
  129. }
  130. static inline pending_ring_idx_t pending_index(unsigned i)
  131. {
  132. return i & (MAX_PENDING_REQS-1);
  133. }
  134. static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
  135. {
  136. RING_IDX prod, cons;
  137. struct sk_buff *skb;
  138. int needed;
  139. skb = skb_peek(&queue->rx_queue);
  140. if (!skb)
  141. return false;
  142. needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
  143. if (skb_is_gso(skb))
  144. needed++;
  145. do {
  146. prod = queue->rx.sring->req_prod;
  147. cons = queue->rx.req_cons;
  148. if (prod - cons >= needed)
  149. return true;
  150. queue->rx.sring->req_event = prod + 1;
  151. /* Make sure event is visible before we check prod
  152. * again.
  153. */
  154. mb();
  155. } while (queue->rx.sring->req_prod != prod);
  156. return false;
  157. }
  158. void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
  159. {
  160. unsigned long flags;
  161. spin_lock_irqsave(&queue->rx_queue.lock, flags);
  162. __skb_queue_tail(&queue->rx_queue, skb);
  163. queue->rx_queue_len += skb->len;
  164. if (queue->rx_queue_len > queue->rx_queue_max)
  165. netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
  166. spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
  167. }
  168. static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
  169. {
  170. struct sk_buff *skb;
  171. spin_lock_irq(&queue->rx_queue.lock);
  172. skb = __skb_dequeue(&queue->rx_queue);
  173. if (skb)
  174. queue->rx_queue_len -= skb->len;
  175. spin_unlock_irq(&queue->rx_queue.lock);
  176. return skb;
  177. }
  178. static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
  179. {
  180. spin_lock_irq(&queue->rx_queue.lock);
  181. if (queue->rx_queue_len < queue->rx_queue_max)
  182. netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
  183. spin_unlock_irq(&queue->rx_queue.lock);
  184. }
  185. static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
  186. {
  187. struct sk_buff *skb;
  188. while ((skb = xenvif_rx_dequeue(queue)) != NULL)
  189. kfree_skb(skb);
  190. }
  191. static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
  192. {
  193. struct sk_buff *skb;
  194. for(;;) {
  195. skb = skb_peek(&queue->rx_queue);
  196. if (!skb)
  197. break;
  198. if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
  199. break;
  200. xenvif_rx_dequeue(queue);
  201. kfree_skb(skb);
  202. }
  203. }
  204. struct netrx_pending_operations {
  205. unsigned copy_prod, copy_cons;
  206. unsigned meta_prod, meta_cons;
  207. struct gnttab_copy *copy;
  208. struct xenvif_rx_meta *meta;
  209. int copy_off;
  210. grant_ref_t copy_gref;
  211. };
  212. static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
  213. struct netrx_pending_operations *npo)
  214. {
  215. struct xenvif_rx_meta *meta;
  216. struct xen_netif_rx_request req;
  217. RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
  218. meta = npo->meta + npo->meta_prod++;
  219. meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
  220. meta->gso_size = 0;
  221. meta->size = 0;
  222. meta->id = req.id;
  223. npo->copy_off = 0;
  224. npo->copy_gref = req.gref;
  225. return meta;
  226. }
  227. struct gop_frag_copy {
  228. struct xenvif_queue *queue;
  229. struct netrx_pending_operations *npo;
  230. struct xenvif_rx_meta *meta;
  231. int head;
  232. int gso_type;
  233. struct page *page;
  234. };
  235. static void xenvif_setup_copy_gop(unsigned long gfn,
  236. unsigned int offset,
  237. unsigned int *len,
  238. struct gop_frag_copy *info)
  239. {
  240. struct gnttab_copy *copy_gop;
  241. struct xen_page_foreign *foreign;
  242. /* Convenient aliases */
  243. struct xenvif_queue *queue = info->queue;
  244. struct netrx_pending_operations *npo = info->npo;
  245. struct page *page = info->page;
  246. BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
  247. if (npo->copy_off == MAX_BUFFER_OFFSET)
  248. info->meta = get_next_rx_buffer(queue, npo);
  249. if (npo->copy_off + *len > MAX_BUFFER_OFFSET)
  250. *len = MAX_BUFFER_OFFSET - npo->copy_off;
  251. copy_gop = npo->copy + npo->copy_prod++;
  252. copy_gop->flags = GNTCOPY_dest_gref;
  253. copy_gop->len = *len;
  254. foreign = xen_page_foreign(page);
  255. if (foreign) {
  256. copy_gop->source.domid = foreign->domid;
  257. copy_gop->source.u.ref = foreign->gref;
  258. copy_gop->flags |= GNTCOPY_source_gref;
  259. } else {
  260. copy_gop->source.domid = DOMID_SELF;
  261. copy_gop->source.u.gmfn = gfn;
  262. }
  263. copy_gop->source.offset = offset;
  264. copy_gop->dest.domid = queue->vif->domid;
  265. copy_gop->dest.offset = npo->copy_off;
  266. copy_gop->dest.u.ref = npo->copy_gref;
  267. npo->copy_off += *len;
  268. info->meta->size += *len;
  269. /* Leave a gap for the GSO descriptor. */
  270. if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
  271. queue->rx.req_cons++;
  272. info->head = 0; /* There must be something in this buffer now */
  273. }
  274. static void xenvif_gop_frag_copy_grant(unsigned long gfn,
  275. unsigned offset,
  276. unsigned int len,
  277. void *data)
  278. {
  279. unsigned int bytes;
  280. while (len) {
  281. bytes = len;
  282. xenvif_setup_copy_gop(gfn, offset, &bytes, data);
  283. offset += bytes;
  284. len -= bytes;
  285. }
  286. }
  287. /*
  288. * Set up the grant operations for this fragment. If it's a flipping
  289. * interface, we also set up the unmap request from here.
  290. */
  291. static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
  292. struct netrx_pending_operations *npo,
  293. struct page *page, unsigned long size,
  294. unsigned long offset, int *head)
  295. {
  296. struct gop_frag_copy info = {
  297. .queue = queue,
  298. .npo = npo,
  299. .head = *head,
  300. .gso_type = XEN_NETIF_GSO_TYPE_NONE,
  301. };
  302. unsigned long bytes;
  303. if (skb_is_gso(skb)) {
  304. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  305. info.gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
  306. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  307. info.gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
  308. }
  309. /* Data must not cross a page boundary. */
  310. BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
  311. info.meta = npo->meta + npo->meta_prod - 1;
  312. /* Skip unused frames from start of page */
  313. page += offset >> PAGE_SHIFT;
  314. offset &= ~PAGE_MASK;
  315. while (size > 0) {
  316. BUG_ON(offset >= PAGE_SIZE);
  317. bytes = PAGE_SIZE - offset;
  318. if (bytes > size)
  319. bytes = size;
  320. info.page = page;
  321. gnttab_foreach_grant_in_range(page, offset, bytes,
  322. xenvif_gop_frag_copy_grant,
  323. &info);
  324. size -= bytes;
  325. offset = 0;
  326. /* Next page */
  327. if (size) {
  328. BUG_ON(!PageCompound(page));
  329. page++;
  330. }
  331. }
  332. *head = info.head;
  333. }
  334. /*
  335. * Prepare an SKB to be transmitted to the frontend.
  336. *
  337. * This function is responsible for allocating grant operations, meta
  338. * structures, etc.
  339. *
  340. * It returns the number of meta structures consumed. The number of
  341. * ring slots used is always equal to the number of meta slots used
  342. * plus the number of GSO descriptors used. Currently, we use either
  343. * zero GSO descriptors (for non-GSO packets) or one descriptor (for
  344. * frontend-side LRO).
  345. */
  346. static int xenvif_gop_skb(struct sk_buff *skb,
  347. struct netrx_pending_operations *npo,
  348. struct xenvif_queue *queue)
  349. {
  350. struct xenvif *vif = netdev_priv(skb->dev);
  351. int nr_frags = skb_shinfo(skb)->nr_frags;
  352. int i;
  353. struct xen_netif_rx_request req;
  354. struct xenvif_rx_meta *meta;
  355. unsigned char *data;
  356. int head = 1;
  357. int old_meta_prod;
  358. int gso_type;
  359. old_meta_prod = npo->meta_prod;
  360. gso_type = XEN_NETIF_GSO_TYPE_NONE;
  361. if (skb_is_gso(skb)) {
  362. if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
  363. gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
  364. else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
  365. gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
  366. }
  367. /* Set up a GSO prefix descriptor, if necessary */
  368. if ((1 << gso_type) & vif->gso_prefix_mask) {
  369. RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
  370. meta = npo->meta + npo->meta_prod++;
  371. meta->gso_type = gso_type;
  372. meta->gso_size = skb_shinfo(skb)->gso_size;
  373. meta->size = 0;
  374. meta->id = req.id;
  375. }
  376. RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
  377. meta = npo->meta + npo->meta_prod++;
  378. if ((1 << gso_type) & vif->gso_mask) {
  379. meta->gso_type = gso_type;
  380. meta->gso_size = skb_shinfo(skb)->gso_size;
  381. } else {
  382. meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
  383. meta->gso_size = 0;
  384. }
  385. meta->size = 0;
  386. meta->id = req.id;
  387. npo->copy_off = 0;
  388. npo->copy_gref = req.gref;
  389. data = skb->data;
  390. while (data < skb_tail_pointer(skb)) {
  391. unsigned int offset = offset_in_page(data);
  392. unsigned int len = PAGE_SIZE - offset;
  393. if (data + len > skb_tail_pointer(skb))
  394. len = skb_tail_pointer(skb) - data;
  395. xenvif_gop_frag_copy(queue, skb, npo,
  396. virt_to_page(data), len, offset, &head);
  397. data += len;
  398. }
  399. for (i = 0; i < nr_frags; i++) {
  400. xenvif_gop_frag_copy(queue, skb, npo,
  401. skb_frag_page(&skb_shinfo(skb)->frags[i]),
  402. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  403. skb_shinfo(skb)->frags[i].page_offset,
  404. &head);
  405. }
  406. return npo->meta_prod - old_meta_prod;
  407. }
  408. /*
  409. * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
  410. * used to set up the operations on the top of
  411. * netrx_pending_operations, which have since been done. Check that
  412. * they didn't give any errors and advance over them.
  413. */
  414. static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
  415. struct netrx_pending_operations *npo)
  416. {
  417. struct gnttab_copy *copy_op;
  418. int status = XEN_NETIF_RSP_OKAY;
  419. int i;
  420. for (i = 0; i < nr_meta_slots; i++) {
  421. copy_op = npo->copy + npo->copy_cons++;
  422. if (copy_op->status != GNTST_okay) {
  423. netdev_dbg(vif->dev,
  424. "Bad status %d from copy to DOM%d.\n",
  425. copy_op->status, vif->domid);
  426. status = XEN_NETIF_RSP_ERROR;
  427. }
  428. }
  429. return status;
  430. }
  431. static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
  432. struct xenvif_rx_meta *meta,
  433. int nr_meta_slots)
  434. {
  435. int i;
  436. unsigned long offset;
  437. /* No fragments used */
  438. if (nr_meta_slots <= 1)
  439. return;
  440. nr_meta_slots--;
  441. for (i = 0; i < nr_meta_slots; i++) {
  442. int flags;
  443. if (i == nr_meta_slots - 1)
  444. flags = 0;
  445. else
  446. flags = XEN_NETRXF_more_data;
  447. offset = 0;
  448. make_rx_response(queue, meta[i].id, status, offset,
  449. meta[i].size, flags);
  450. }
  451. }
  452. void xenvif_kick_thread(struct xenvif_queue *queue)
  453. {
  454. wake_up(&queue->wq);
  455. }
  456. static void xenvif_rx_action(struct xenvif_queue *queue)
  457. {
  458. s8 status;
  459. u16 flags;
  460. struct xen_netif_rx_response *resp;
  461. struct sk_buff_head rxq;
  462. struct sk_buff *skb;
  463. LIST_HEAD(notify);
  464. int ret;
  465. unsigned long offset;
  466. bool need_to_notify = false;
  467. struct netrx_pending_operations npo = {
  468. .copy = queue->grant_copy_op,
  469. .meta = queue->meta,
  470. };
  471. skb_queue_head_init(&rxq);
  472. while (xenvif_rx_ring_slots_available(queue)
  473. && (skb = xenvif_rx_dequeue(queue)) != NULL) {
  474. queue->last_rx_time = jiffies;
  475. XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
  476. __skb_queue_tail(&rxq, skb);
  477. }
  478. BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
  479. if (!npo.copy_prod)
  480. goto done;
  481. BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
  482. gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
  483. while ((skb = __skb_dequeue(&rxq)) != NULL) {
  484. if ((1 << queue->meta[npo.meta_cons].gso_type) &
  485. queue->vif->gso_prefix_mask) {
  486. resp = RING_GET_RESPONSE(&queue->rx,
  487. queue->rx.rsp_prod_pvt++);
  488. resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
  489. resp->offset = queue->meta[npo.meta_cons].gso_size;
  490. resp->id = queue->meta[npo.meta_cons].id;
  491. resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
  492. npo.meta_cons++;
  493. XENVIF_RX_CB(skb)->meta_slots_used--;
  494. }
  495. queue->stats.tx_bytes += skb->len;
  496. queue->stats.tx_packets++;
  497. status = xenvif_check_gop(queue->vif,
  498. XENVIF_RX_CB(skb)->meta_slots_used,
  499. &npo);
  500. if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
  501. flags = 0;
  502. else
  503. flags = XEN_NETRXF_more_data;
  504. if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
  505. flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
  506. else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  507. /* remote but checksummed. */
  508. flags |= XEN_NETRXF_data_validated;
  509. offset = 0;
  510. resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
  511. status, offset,
  512. queue->meta[npo.meta_cons].size,
  513. flags);
  514. if ((1 << queue->meta[npo.meta_cons].gso_type) &
  515. queue->vif->gso_mask) {
  516. struct xen_netif_extra_info *gso =
  517. (struct xen_netif_extra_info *)
  518. RING_GET_RESPONSE(&queue->rx,
  519. queue->rx.rsp_prod_pvt++);
  520. resp->flags |= XEN_NETRXF_extra_info;
  521. gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
  522. gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
  523. gso->u.gso.pad = 0;
  524. gso->u.gso.features = 0;
  525. gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
  526. gso->flags = 0;
  527. }
  528. xenvif_add_frag_responses(queue, status,
  529. queue->meta + npo.meta_cons + 1,
  530. XENVIF_RX_CB(skb)->meta_slots_used);
  531. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
  532. need_to_notify |= !!ret;
  533. npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
  534. dev_kfree_skb(skb);
  535. }
  536. done:
  537. if (need_to_notify)
  538. notify_remote_via_irq(queue->rx_irq);
  539. }
  540. void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
  541. {
  542. int more_to_do;
  543. RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
  544. if (more_to_do)
  545. napi_schedule(&queue->napi);
  546. }
  547. static void tx_add_credit(struct xenvif_queue *queue)
  548. {
  549. unsigned long max_burst, max_credit;
  550. /*
  551. * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
  552. * Otherwise the interface can seize up due to insufficient credit.
  553. */
  554. max_burst = max(131072UL, queue->credit_bytes);
  555. /* Take care that adding a new chunk of credit doesn't wrap to zero. */
  556. max_credit = queue->remaining_credit + queue->credit_bytes;
  557. if (max_credit < queue->remaining_credit)
  558. max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
  559. queue->remaining_credit = min(max_credit, max_burst);
  560. }
  561. void xenvif_tx_credit_callback(unsigned long data)
  562. {
  563. struct xenvif_queue *queue = (struct xenvif_queue *)data;
  564. tx_add_credit(queue);
  565. xenvif_napi_schedule_or_enable_events(queue);
  566. }
  567. static void xenvif_tx_err(struct xenvif_queue *queue,
  568. struct xen_netif_tx_request *txp,
  569. unsigned int extra_count, RING_IDX end)
  570. {
  571. RING_IDX cons = queue->tx.req_cons;
  572. unsigned long flags;
  573. do {
  574. spin_lock_irqsave(&queue->response_lock, flags);
  575. make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
  576. push_tx_responses(queue);
  577. spin_unlock_irqrestore(&queue->response_lock, flags);
  578. if (cons == end)
  579. break;
  580. RING_COPY_REQUEST(&queue->tx, cons++, txp);
  581. extra_count = 0; /* only the first frag can have extras */
  582. } while (1);
  583. queue->tx.req_cons = cons;
  584. }
  585. static void xenvif_fatal_tx_err(struct xenvif *vif)
  586. {
  587. netdev_err(vif->dev, "fatal error; disabling device\n");
  588. vif->disabled = true;
  589. /* Disable the vif from queue 0's kthread */
  590. if (vif->queues)
  591. xenvif_kick_thread(&vif->queues[0]);
  592. }
  593. static int xenvif_count_requests(struct xenvif_queue *queue,
  594. struct xen_netif_tx_request *first,
  595. unsigned int extra_count,
  596. struct xen_netif_tx_request *txp,
  597. int work_to_do)
  598. {
  599. RING_IDX cons = queue->tx.req_cons;
  600. int slots = 0;
  601. int drop_err = 0;
  602. int more_data;
  603. if (!(first->flags & XEN_NETTXF_more_data))
  604. return 0;
  605. do {
  606. struct xen_netif_tx_request dropped_tx = { 0 };
  607. if (slots >= work_to_do) {
  608. netdev_err(queue->vif->dev,
  609. "Asked for %d slots but exceeds this limit\n",
  610. work_to_do);
  611. xenvif_fatal_tx_err(queue->vif);
  612. return -ENODATA;
  613. }
  614. /* This guest is really using too many slots and
  615. * considered malicious.
  616. */
  617. if (unlikely(slots >= fatal_skb_slots)) {
  618. netdev_err(queue->vif->dev,
  619. "Malicious frontend using %d slots, threshold %u\n",
  620. slots, fatal_skb_slots);
  621. xenvif_fatal_tx_err(queue->vif);
  622. return -E2BIG;
  623. }
  624. /* Xen network protocol had implicit dependency on
  625. * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
  626. * the historical MAX_SKB_FRAGS value 18 to honor the
  627. * same behavior as before. Any packet using more than
  628. * 18 slots but less than fatal_skb_slots slots is
  629. * dropped
  630. */
  631. if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
  632. if (net_ratelimit())
  633. netdev_dbg(queue->vif->dev,
  634. "Too many slots (%d) exceeding limit (%d), dropping packet\n",
  635. slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  636. drop_err = -E2BIG;
  637. }
  638. if (drop_err)
  639. txp = &dropped_tx;
  640. RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
  641. /* If the guest submitted a frame >= 64 KiB then
  642. * first->size overflowed and following slots will
  643. * appear to be larger than the frame.
  644. *
  645. * This cannot be fatal error as there are buggy
  646. * frontends that do this.
  647. *
  648. * Consume all slots and drop the packet.
  649. */
  650. if (!drop_err && txp->size > first->size) {
  651. if (net_ratelimit())
  652. netdev_dbg(queue->vif->dev,
  653. "Invalid tx request, slot size %u > remaining size %u\n",
  654. txp->size, first->size);
  655. drop_err = -EIO;
  656. }
  657. first->size -= txp->size;
  658. slots++;
  659. if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
  660. netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
  661. txp->offset, txp->size);
  662. xenvif_fatal_tx_err(queue->vif);
  663. return -EINVAL;
  664. }
  665. more_data = txp->flags & XEN_NETTXF_more_data;
  666. if (!drop_err)
  667. txp++;
  668. } while (more_data);
  669. if (drop_err) {
  670. xenvif_tx_err(queue, first, extra_count, cons + slots);
  671. return drop_err;
  672. }
  673. return slots;
  674. }
  675. struct xenvif_tx_cb {
  676. u16 pending_idx;
  677. };
  678. #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
  679. static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
  680. u16 pending_idx,
  681. struct xen_netif_tx_request *txp,
  682. unsigned int extra_count,
  683. struct gnttab_map_grant_ref *mop)
  684. {
  685. queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
  686. gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
  687. GNTMAP_host_map | GNTMAP_readonly,
  688. txp->gref, queue->vif->domid);
  689. memcpy(&queue->pending_tx_info[pending_idx].req, txp,
  690. sizeof(*txp));
  691. queue->pending_tx_info[pending_idx].extra_count = extra_count;
  692. }
  693. static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
  694. {
  695. struct sk_buff *skb =
  696. alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
  697. GFP_ATOMIC | __GFP_NOWARN);
  698. if (unlikely(skb == NULL))
  699. return NULL;
  700. /* Packets passed to netif_rx() must have some headroom. */
  701. skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
  702. /* Initialize it here to avoid later surprises */
  703. skb_shinfo(skb)->destructor_arg = NULL;
  704. return skb;
  705. }
  706. static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
  707. struct sk_buff *skb,
  708. struct xen_netif_tx_request *txp,
  709. struct gnttab_map_grant_ref *gop,
  710. unsigned int frag_overflow,
  711. struct sk_buff *nskb)
  712. {
  713. struct skb_shared_info *shinfo = skb_shinfo(skb);
  714. skb_frag_t *frags = shinfo->frags;
  715. u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  716. int start;
  717. pending_ring_idx_t index;
  718. unsigned int nr_slots;
  719. nr_slots = shinfo->nr_frags;
  720. /* Skip first skb fragment if it is on same page as header fragment. */
  721. start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
  722. for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
  723. shinfo->nr_frags++, txp++, gop++) {
  724. index = pending_index(queue->pending_cons++);
  725. pending_idx = queue->pending_ring[index];
  726. xenvif_tx_create_map_op(queue, pending_idx, txp, 0, gop);
  727. frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
  728. }
  729. if (frag_overflow) {
  730. shinfo = skb_shinfo(nskb);
  731. frags = shinfo->frags;
  732. for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
  733. shinfo->nr_frags++, txp++, gop++) {
  734. index = pending_index(queue->pending_cons++);
  735. pending_idx = queue->pending_ring[index];
  736. xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
  737. gop);
  738. frag_set_pending_idx(&frags[shinfo->nr_frags],
  739. pending_idx);
  740. }
  741. skb_shinfo(skb)->frag_list = nskb;
  742. }
  743. return gop;
  744. }
  745. static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
  746. u16 pending_idx,
  747. grant_handle_t handle)
  748. {
  749. if (unlikely(queue->grant_tx_handle[pending_idx] !=
  750. NETBACK_INVALID_HANDLE)) {
  751. netdev_err(queue->vif->dev,
  752. "Trying to overwrite active handle! pending_idx: 0x%x\n",
  753. pending_idx);
  754. BUG();
  755. }
  756. queue->grant_tx_handle[pending_idx] = handle;
  757. }
  758. static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
  759. u16 pending_idx)
  760. {
  761. if (unlikely(queue->grant_tx_handle[pending_idx] ==
  762. NETBACK_INVALID_HANDLE)) {
  763. netdev_err(queue->vif->dev,
  764. "Trying to unmap invalid handle! pending_idx: 0x%x\n",
  765. pending_idx);
  766. BUG();
  767. }
  768. queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
  769. }
  770. static int xenvif_tx_check_gop(struct xenvif_queue *queue,
  771. struct sk_buff *skb,
  772. struct gnttab_map_grant_ref **gopp_map,
  773. struct gnttab_copy **gopp_copy)
  774. {
  775. struct gnttab_map_grant_ref *gop_map = *gopp_map;
  776. u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  777. /* This always points to the shinfo of the skb being checked, which
  778. * could be either the first or the one on the frag_list
  779. */
  780. struct skb_shared_info *shinfo = skb_shinfo(skb);
  781. /* If this is non-NULL, we are currently checking the frag_list skb, and
  782. * this points to the shinfo of the first one
  783. */
  784. struct skb_shared_info *first_shinfo = NULL;
  785. int nr_frags = shinfo->nr_frags;
  786. const bool sharedslot = nr_frags &&
  787. frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
  788. int i, err;
  789. /* Check status of header. */
  790. err = (*gopp_copy)->status;
  791. if (unlikely(err)) {
  792. if (net_ratelimit())
  793. netdev_dbg(queue->vif->dev,
  794. "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
  795. (*gopp_copy)->status,
  796. pending_idx,
  797. (*gopp_copy)->source.u.ref);
  798. /* The first frag might still have this slot mapped */
  799. if (!sharedslot)
  800. xenvif_idx_release(queue, pending_idx,
  801. XEN_NETIF_RSP_ERROR);
  802. }
  803. (*gopp_copy)++;
  804. check_frags:
  805. for (i = 0; i < nr_frags; i++, gop_map++) {
  806. int j, newerr;
  807. pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
  808. /* Check error status: if okay then remember grant handle. */
  809. newerr = gop_map->status;
  810. if (likely(!newerr)) {
  811. xenvif_grant_handle_set(queue,
  812. pending_idx,
  813. gop_map->handle);
  814. /* Had a previous error? Invalidate this fragment. */
  815. if (unlikely(err)) {
  816. xenvif_idx_unmap(queue, pending_idx);
  817. /* If the mapping of the first frag was OK, but
  818. * the header's copy failed, and they are
  819. * sharing a slot, send an error
  820. */
  821. if (i == 0 && sharedslot)
  822. xenvif_idx_release(queue, pending_idx,
  823. XEN_NETIF_RSP_ERROR);
  824. else
  825. xenvif_idx_release(queue, pending_idx,
  826. XEN_NETIF_RSP_OKAY);
  827. }
  828. continue;
  829. }
  830. /* Error on this fragment: respond to client with an error. */
  831. if (net_ratelimit())
  832. netdev_dbg(queue->vif->dev,
  833. "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
  834. i,
  835. gop_map->status,
  836. pending_idx,
  837. gop_map->ref);
  838. xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
  839. /* Not the first error? Preceding frags already invalidated. */
  840. if (err)
  841. continue;
  842. /* First error: if the header haven't shared a slot with the
  843. * first frag, release it as well.
  844. */
  845. if (!sharedslot)
  846. xenvif_idx_release(queue,
  847. XENVIF_TX_CB(skb)->pending_idx,
  848. XEN_NETIF_RSP_OKAY);
  849. /* Invalidate preceding fragments of this skb. */
  850. for (j = 0; j < i; j++) {
  851. pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
  852. xenvif_idx_unmap(queue, pending_idx);
  853. xenvif_idx_release(queue, pending_idx,
  854. XEN_NETIF_RSP_OKAY);
  855. }
  856. /* And if we found the error while checking the frag_list, unmap
  857. * the first skb's frags
  858. */
  859. if (first_shinfo) {
  860. for (j = 0; j < first_shinfo->nr_frags; j++) {
  861. pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
  862. xenvif_idx_unmap(queue, pending_idx);
  863. xenvif_idx_release(queue, pending_idx,
  864. XEN_NETIF_RSP_OKAY);
  865. }
  866. }
  867. /* Remember the error: invalidate all subsequent fragments. */
  868. err = newerr;
  869. }
  870. if (skb_has_frag_list(skb) && !first_shinfo) {
  871. first_shinfo = skb_shinfo(skb);
  872. shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
  873. nr_frags = shinfo->nr_frags;
  874. goto check_frags;
  875. }
  876. *gopp_map = gop_map;
  877. return err;
  878. }
  879. static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
  880. {
  881. struct skb_shared_info *shinfo = skb_shinfo(skb);
  882. int nr_frags = shinfo->nr_frags;
  883. int i;
  884. u16 prev_pending_idx = INVALID_PENDING_IDX;
  885. for (i = 0; i < nr_frags; i++) {
  886. skb_frag_t *frag = shinfo->frags + i;
  887. struct xen_netif_tx_request *txp;
  888. struct page *page;
  889. u16 pending_idx;
  890. pending_idx = frag_get_pending_idx(frag);
  891. /* If this is not the first frag, chain it to the previous*/
  892. if (prev_pending_idx == INVALID_PENDING_IDX)
  893. skb_shinfo(skb)->destructor_arg =
  894. &callback_param(queue, pending_idx);
  895. else
  896. callback_param(queue, prev_pending_idx).ctx =
  897. &callback_param(queue, pending_idx);
  898. callback_param(queue, pending_idx).ctx = NULL;
  899. prev_pending_idx = pending_idx;
  900. txp = &queue->pending_tx_info[pending_idx].req;
  901. page = virt_to_page(idx_to_kaddr(queue, pending_idx));
  902. __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
  903. skb->len += txp->size;
  904. skb->data_len += txp->size;
  905. skb->truesize += txp->size;
  906. /* Take an extra reference to offset network stack's put_page */
  907. get_page(queue->mmap_pages[pending_idx]);
  908. }
  909. }
  910. static int xenvif_get_extras(struct xenvif_queue *queue,
  911. struct xen_netif_extra_info *extras,
  912. unsigned int *extra_count,
  913. int work_to_do)
  914. {
  915. struct xen_netif_extra_info extra;
  916. RING_IDX cons = queue->tx.req_cons;
  917. do {
  918. if (unlikely(work_to_do-- <= 0)) {
  919. netdev_err(queue->vif->dev, "Missing extra info\n");
  920. xenvif_fatal_tx_err(queue->vif);
  921. return -EBADR;
  922. }
  923. RING_COPY_REQUEST(&queue->tx, cons, &extra);
  924. queue->tx.req_cons = ++cons;
  925. (*extra_count)++;
  926. if (unlikely(!extra.type ||
  927. extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
  928. netdev_err(queue->vif->dev,
  929. "Invalid extra type: %d\n", extra.type);
  930. xenvif_fatal_tx_err(queue->vif);
  931. return -EINVAL;
  932. }
  933. memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
  934. } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
  935. return work_to_do;
  936. }
  937. static int xenvif_set_skb_gso(struct xenvif *vif,
  938. struct sk_buff *skb,
  939. struct xen_netif_extra_info *gso)
  940. {
  941. if (!gso->u.gso.size) {
  942. netdev_err(vif->dev, "GSO size must not be zero.\n");
  943. xenvif_fatal_tx_err(vif);
  944. return -EINVAL;
  945. }
  946. switch (gso->u.gso.type) {
  947. case XEN_NETIF_GSO_TYPE_TCPV4:
  948. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
  949. break;
  950. case XEN_NETIF_GSO_TYPE_TCPV6:
  951. skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
  952. break;
  953. default:
  954. netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
  955. xenvif_fatal_tx_err(vif);
  956. return -EINVAL;
  957. }
  958. skb_shinfo(skb)->gso_size = gso->u.gso.size;
  959. /* gso_segs will be calculated later */
  960. return 0;
  961. }
  962. static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
  963. {
  964. bool recalculate_partial_csum = false;
  965. /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
  966. * peers can fail to set NETRXF_csum_blank when sending a GSO
  967. * frame. In this case force the SKB to CHECKSUM_PARTIAL and
  968. * recalculate the partial checksum.
  969. */
  970. if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
  971. queue->stats.rx_gso_checksum_fixup++;
  972. skb->ip_summed = CHECKSUM_PARTIAL;
  973. recalculate_partial_csum = true;
  974. }
  975. /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
  976. if (skb->ip_summed != CHECKSUM_PARTIAL)
  977. return 0;
  978. return skb_checksum_setup(skb, recalculate_partial_csum);
  979. }
  980. static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
  981. {
  982. u64 now = get_jiffies_64();
  983. u64 next_credit = queue->credit_window_start +
  984. msecs_to_jiffies(queue->credit_usec / 1000);
  985. /* Timer could already be pending in rare cases. */
  986. if (timer_pending(&queue->credit_timeout))
  987. return true;
  988. /* Passed the point where we can replenish credit? */
  989. if (time_after_eq64(now, next_credit)) {
  990. queue->credit_window_start = now;
  991. tx_add_credit(queue);
  992. }
  993. /* Still too big to send right now? Set a callback. */
  994. if (size > queue->remaining_credit) {
  995. queue->credit_timeout.data =
  996. (unsigned long)queue;
  997. mod_timer(&queue->credit_timeout,
  998. next_credit);
  999. queue->credit_window_start = next_credit;
  1000. return true;
  1001. }
  1002. return false;
  1003. }
  1004. /* No locking is required in xenvif_mcast_add/del() as they are
  1005. * only ever invoked from NAPI poll. An RCU list is used because
  1006. * xenvif_mcast_match() is called asynchronously, during start_xmit.
  1007. */
  1008. static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
  1009. {
  1010. struct xenvif_mcast_addr *mcast;
  1011. if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
  1012. if (net_ratelimit())
  1013. netdev_err(vif->dev,
  1014. "Too many multicast addresses\n");
  1015. return -ENOSPC;
  1016. }
  1017. mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
  1018. if (!mcast)
  1019. return -ENOMEM;
  1020. ether_addr_copy(mcast->addr, addr);
  1021. list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
  1022. vif->fe_mcast_count++;
  1023. return 0;
  1024. }
  1025. static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
  1026. {
  1027. struct xenvif_mcast_addr *mcast;
  1028. list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
  1029. if (ether_addr_equal(addr, mcast->addr)) {
  1030. --vif->fe_mcast_count;
  1031. list_del_rcu(&mcast->entry);
  1032. kfree_rcu(mcast, rcu);
  1033. break;
  1034. }
  1035. }
  1036. }
  1037. bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
  1038. {
  1039. struct xenvif_mcast_addr *mcast;
  1040. rcu_read_lock();
  1041. list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
  1042. if (ether_addr_equal(addr, mcast->addr)) {
  1043. rcu_read_unlock();
  1044. return true;
  1045. }
  1046. }
  1047. rcu_read_unlock();
  1048. return false;
  1049. }
  1050. void xenvif_mcast_addr_list_free(struct xenvif *vif)
  1051. {
  1052. /* No need for locking or RCU here. NAPI poll and TX queue
  1053. * are stopped.
  1054. */
  1055. while (!list_empty(&vif->fe_mcast_addr)) {
  1056. struct xenvif_mcast_addr *mcast;
  1057. mcast = list_first_entry(&vif->fe_mcast_addr,
  1058. struct xenvif_mcast_addr,
  1059. entry);
  1060. --vif->fe_mcast_count;
  1061. list_del(&mcast->entry);
  1062. kfree(mcast);
  1063. }
  1064. }
  1065. static void xenvif_tx_build_gops(struct xenvif_queue *queue,
  1066. int budget,
  1067. unsigned *copy_ops,
  1068. unsigned *map_ops)
  1069. {
  1070. struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
  1071. struct sk_buff *skb, *nskb;
  1072. int ret;
  1073. unsigned int frag_overflow;
  1074. while (skb_queue_len(&queue->tx_queue) < budget) {
  1075. struct xen_netif_tx_request txreq;
  1076. struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
  1077. struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
  1078. unsigned int extra_count;
  1079. u16 pending_idx;
  1080. RING_IDX idx;
  1081. int work_to_do;
  1082. unsigned int data_len;
  1083. pending_ring_idx_t index;
  1084. if (queue->tx.sring->req_prod - queue->tx.req_cons >
  1085. XEN_NETIF_TX_RING_SIZE) {
  1086. netdev_err(queue->vif->dev,
  1087. "Impossible number of requests. "
  1088. "req_prod %d, req_cons %d, size %ld\n",
  1089. queue->tx.sring->req_prod, queue->tx.req_cons,
  1090. XEN_NETIF_TX_RING_SIZE);
  1091. xenvif_fatal_tx_err(queue->vif);
  1092. break;
  1093. }
  1094. work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
  1095. if (!work_to_do)
  1096. break;
  1097. idx = queue->tx.req_cons;
  1098. rmb(); /* Ensure that we see the request before we copy it. */
  1099. RING_COPY_REQUEST(&queue->tx, idx, &txreq);
  1100. /* Credit-based scheduling. */
  1101. if (txreq.size > queue->remaining_credit &&
  1102. tx_credit_exceeded(queue, txreq.size))
  1103. break;
  1104. queue->remaining_credit -= txreq.size;
  1105. work_to_do--;
  1106. queue->tx.req_cons = ++idx;
  1107. memset(extras, 0, sizeof(extras));
  1108. extra_count = 0;
  1109. if (txreq.flags & XEN_NETTXF_extra_info) {
  1110. work_to_do = xenvif_get_extras(queue, extras,
  1111. &extra_count,
  1112. work_to_do);
  1113. idx = queue->tx.req_cons;
  1114. if (unlikely(work_to_do < 0))
  1115. break;
  1116. }
  1117. if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
  1118. struct xen_netif_extra_info *extra;
  1119. extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
  1120. ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
  1121. make_tx_response(queue, &txreq, extra_count,
  1122. (ret == 0) ?
  1123. XEN_NETIF_RSP_OKAY :
  1124. XEN_NETIF_RSP_ERROR);
  1125. push_tx_responses(queue);
  1126. continue;
  1127. }
  1128. if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
  1129. struct xen_netif_extra_info *extra;
  1130. extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
  1131. xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
  1132. make_tx_response(queue, &txreq, extra_count,
  1133. XEN_NETIF_RSP_OKAY);
  1134. push_tx_responses(queue);
  1135. continue;
  1136. }
  1137. ret = xenvif_count_requests(queue, &txreq, extra_count,
  1138. txfrags, work_to_do);
  1139. if (unlikely(ret < 0))
  1140. break;
  1141. idx += ret;
  1142. if (unlikely(txreq.size < ETH_HLEN)) {
  1143. netdev_dbg(queue->vif->dev,
  1144. "Bad packet size: %d\n", txreq.size);
  1145. xenvif_tx_err(queue, &txreq, extra_count, idx);
  1146. break;
  1147. }
  1148. /* No crossing a page as the payload mustn't fragment. */
  1149. if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
  1150. netdev_err(queue->vif->dev,
  1151. "txreq.offset: %u, size: %u, end: %lu\n",
  1152. txreq.offset, txreq.size,
  1153. (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
  1154. xenvif_fatal_tx_err(queue->vif);
  1155. break;
  1156. }
  1157. index = pending_index(queue->pending_cons);
  1158. pending_idx = queue->pending_ring[index];
  1159. data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
  1160. ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
  1161. XEN_NETBACK_TX_COPY_LEN : txreq.size;
  1162. skb = xenvif_alloc_skb(data_len);
  1163. if (unlikely(skb == NULL)) {
  1164. netdev_dbg(queue->vif->dev,
  1165. "Can't allocate a skb in start_xmit.\n");
  1166. xenvif_tx_err(queue, &txreq, extra_count, idx);
  1167. break;
  1168. }
  1169. skb_shinfo(skb)->nr_frags = ret;
  1170. if (data_len < txreq.size)
  1171. skb_shinfo(skb)->nr_frags++;
  1172. /* At this point shinfo->nr_frags is in fact the number of
  1173. * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
  1174. */
  1175. frag_overflow = 0;
  1176. nskb = NULL;
  1177. if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
  1178. frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
  1179. BUG_ON(frag_overflow > MAX_SKB_FRAGS);
  1180. skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
  1181. nskb = xenvif_alloc_skb(0);
  1182. if (unlikely(nskb == NULL)) {
  1183. kfree_skb(skb);
  1184. xenvif_tx_err(queue, &txreq, extra_count, idx);
  1185. if (net_ratelimit())
  1186. netdev_err(queue->vif->dev,
  1187. "Can't allocate the frag_list skb.\n");
  1188. break;
  1189. }
  1190. }
  1191. if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
  1192. struct xen_netif_extra_info *gso;
  1193. gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
  1194. if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
  1195. /* Failure in xenvif_set_skb_gso is fatal. */
  1196. kfree_skb(skb);
  1197. kfree_skb(nskb);
  1198. break;
  1199. }
  1200. }
  1201. XENVIF_TX_CB(skb)->pending_idx = pending_idx;
  1202. __skb_put(skb, data_len);
  1203. queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
  1204. queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
  1205. queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
  1206. queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
  1207. virt_to_gfn(skb->data);
  1208. queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
  1209. queue->tx_copy_ops[*copy_ops].dest.offset =
  1210. offset_in_page(skb->data) & ~XEN_PAGE_MASK;
  1211. queue->tx_copy_ops[*copy_ops].len = data_len;
  1212. queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
  1213. (*copy_ops)++;
  1214. if (data_len < txreq.size) {
  1215. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1216. pending_idx);
  1217. xenvif_tx_create_map_op(queue, pending_idx, &txreq,
  1218. extra_count, gop);
  1219. gop++;
  1220. } else {
  1221. frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
  1222. INVALID_PENDING_IDX);
  1223. memcpy(&queue->pending_tx_info[pending_idx].req,
  1224. &txreq, sizeof(txreq));
  1225. queue->pending_tx_info[pending_idx].extra_count =
  1226. extra_count;
  1227. }
  1228. queue->pending_cons++;
  1229. gop = xenvif_get_requests(queue, skb, txfrags, gop,
  1230. frag_overflow, nskb);
  1231. __skb_queue_tail(&queue->tx_queue, skb);
  1232. queue->tx.req_cons = idx;
  1233. if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
  1234. (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
  1235. break;
  1236. }
  1237. (*map_ops) = gop - queue->tx_map_ops;
  1238. return;
  1239. }
  1240. /* Consolidate skb with a frag_list into a brand new one with local pages on
  1241. * frags. Returns 0 or -ENOMEM if can't allocate new pages.
  1242. */
  1243. static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
  1244. {
  1245. unsigned int offset = skb_headlen(skb);
  1246. skb_frag_t frags[MAX_SKB_FRAGS];
  1247. int i, f;
  1248. struct ubuf_info *uarg;
  1249. struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
  1250. queue->stats.tx_zerocopy_sent += 2;
  1251. queue->stats.tx_frag_overflow++;
  1252. xenvif_fill_frags(queue, nskb);
  1253. /* Subtract frags size, we will correct it later */
  1254. skb->truesize -= skb->data_len;
  1255. skb->len += nskb->len;
  1256. skb->data_len += nskb->len;
  1257. /* create a brand new frags array and coalesce there */
  1258. for (i = 0; offset < skb->len; i++) {
  1259. struct page *page;
  1260. unsigned int len;
  1261. BUG_ON(i >= MAX_SKB_FRAGS);
  1262. page = alloc_page(GFP_ATOMIC);
  1263. if (!page) {
  1264. int j;
  1265. skb->truesize += skb->data_len;
  1266. for (j = 0; j < i; j++)
  1267. put_page(frags[j].page.p);
  1268. return -ENOMEM;
  1269. }
  1270. if (offset + PAGE_SIZE < skb->len)
  1271. len = PAGE_SIZE;
  1272. else
  1273. len = skb->len - offset;
  1274. if (skb_copy_bits(skb, offset, page_address(page), len))
  1275. BUG();
  1276. offset += len;
  1277. frags[i].page.p = page;
  1278. frags[i].page_offset = 0;
  1279. skb_frag_size_set(&frags[i], len);
  1280. }
  1281. /* Copied all the bits from the frag list -- free it. */
  1282. skb_frag_list_init(skb);
  1283. xenvif_skb_zerocopy_prepare(queue, nskb);
  1284. kfree_skb(nskb);
  1285. /* Release all the original (foreign) frags. */
  1286. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  1287. skb_frag_unref(skb, f);
  1288. uarg = skb_shinfo(skb)->destructor_arg;
  1289. /* increase inflight counter to offset decrement in callback */
  1290. atomic_inc(&queue->inflight_packets);
  1291. uarg->callback(uarg, true);
  1292. skb_shinfo(skb)->destructor_arg = NULL;
  1293. /* Fill the skb with the new (local) frags. */
  1294. memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
  1295. skb_shinfo(skb)->nr_frags = i;
  1296. skb->truesize += i * PAGE_SIZE;
  1297. return 0;
  1298. }
  1299. static int xenvif_tx_submit(struct xenvif_queue *queue)
  1300. {
  1301. struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
  1302. struct gnttab_copy *gop_copy = queue->tx_copy_ops;
  1303. struct sk_buff *skb;
  1304. int work_done = 0;
  1305. while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
  1306. struct xen_netif_tx_request *txp;
  1307. u16 pending_idx;
  1308. unsigned data_len;
  1309. pending_idx = XENVIF_TX_CB(skb)->pending_idx;
  1310. txp = &queue->pending_tx_info[pending_idx].req;
  1311. /* Check the remap error code. */
  1312. if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
  1313. /* If there was an error, xenvif_tx_check_gop is
  1314. * expected to release all the frags which were mapped,
  1315. * so kfree_skb shouldn't do it again
  1316. */
  1317. skb_shinfo(skb)->nr_frags = 0;
  1318. if (skb_has_frag_list(skb)) {
  1319. struct sk_buff *nskb =
  1320. skb_shinfo(skb)->frag_list;
  1321. skb_shinfo(nskb)->nr_frags = 0;
  1322. }
  1323. kfree_skb(skb);
  1324. continue;
  1325. }
  1326. data_len = skb->len;
  1327. callback_param(queue, pending_idx).ctx = NULL;
  1328. if (data_len < txp->size) {
  1329. /* Append the packet payload as a fragment. */
  1330. txp->offset += data_len;
  1331. txp->size -= data_len;
  1332. } else {
  1333. /* Schedule a response immediately. */
  1334. xenvif_idx_release(queue, pending_idx,
  1335. XEN_NETIF_RSP_OKAY);
  1336. }
  1337. if (txp->flags & XEN_NETTXF_csum_blank)
  1338. skb->ip_summed = CHECKSUM_PARTIAL;
  1339. else if (txp->flags & XEN_NETTXF_data_validated)
  1340. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1341. xenvif_fill_frags(queue, skb);
  1342. if (unlikely(skb_has_frag_list(skb))) {
  1343. if (xenvif_handle_frag_list(queue, skb)) {
  1344. if (net_ratelimit())
  1345. netdev_err(queue->vif->dev,
  1346. "Not enough memory to consolidate frag_list!\n");
  1347. xenvif_skb_zerocopy_prepare(queue, skb);
  1348. kfree_skb(skb);
  1349. continue;
  1350. }
  1351. }
  1352. skb->dev = queue->vif->dev;
  1353. skb->protocol = eth_type_trans(skb, skb->dev);
  1354. skb_reset_network_header(skb);
  1355. if (checksum_setup(queue, skb)) {
  1356. netdev_dbg(queue->vif->dev,
  1357. "Can't setup checksum in net_tx_action\n");
  1358. /* We have to set this flag to trigger the callback */
  1359. if (skb_shinfo(skb)->destructor_arg)
  1360. xenvif_skb_zerocopy_prepare(queue, skb);
  1361. kfree_skb(skb);
  1362. continue;
  1363. }
  1364. skb_probe_transport_header(skb, 0);
  1365. /* If the packet is GSO then we will have just set up the
  1366. * transport header offset in checksum_setup so it's now
  1367. * straightforward to calculate gso_segs.
  1368. */
  1369. if (skb_is_gso(skb)) {
  1370. int mss = skb_shinfo(skb)->gso_size;
  1371. int hdrlen = skb_transport_header(skb) -
  1372. skb_mac_header(skb) +
  1373. tcp_hdrlen(skb);
  1374. skb_shinfo(skb)->gso_segs =
  1375. DIV_ROUND_UP(skb->len - hdrlen, mss);
  1376. }
  1377. queue->stats.rx_bytes += skb->len;
  1378. queue->stats.rx_packets++;
  1379. work_done++;
  1380. /* Set this flag right before netif_receive_skb, otherwise
  1381. * someone might think this packet already left netback, and
  1382. * do a skb_copy_ubufs while we are still in control of the
  1383. * skb. E.g. the __pskb_pull_tail earlier can do such thing.
  1384. */
  1385. if (skb_shinfo(skb)->destructor_arg) {
  1386. xenvif_skb_zerocopy_prepare(queue, skb);
  1387. queue->stats.tx_zerocopy_sent++;
  1388. }
  1389. netif_receive_skb(skb);
  1390. }
  1391. return work_done;
  1392. }
  1393. void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
  1394. {
  1395. unsigned long flags;
  1396. pending_ring_idx_t index;
  1397. struct xenvif_queue *queue = ubuf_to_queue(ubuf);
  1398. /* This is the only place where we grab this lock, to protect callbacks
  1399. * from each other.
  1400. */
  1401. spin_lock_irqsave(&queue->callback_lock, flags);
  1402. do {
  1403. u16 pending_idx = ubuf->desc;
  1404. ubuf = (struct ubuf_info *) ubuf->ctx;
  1405. BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
  1406. MAX_PENDING_REQS);
  1407. index = pending_index(queue->dealloc_prod);
  1408. queue->dealloc_ring[index] = pending_idx;
  1409. /* Sync with xenvif_tx_dealloc_action:
  1410. * insert idx then incr producer.
  1411. */
  1412. smp_wmb();
  1413. queue->dealloc_prod++;
  1414. } while (ubuf);
  1415. spin_unlock_irqrestore(&queue->callback_lock, flags);
  1416. if (likely(zerocopy_success))
  1417. queue->stats.tx_zerocopy_success++;
  1418. else
  1419. queue->stats.tx_zerocopy_fail++;
  1420. xenvif_skb_zerocopy_complete(queue);
  1421. }
  1422. static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
  1423. {
  1424. struct gnttab_unmap_grant_ref *gop;
  1425. pending_ring_idx_t dc, dp;
  1426. u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
  1427. unsigned int i = 0;
  1428. dc = queue->dealloc_cons;
  1429. gop = queue->tx_unmap_ops;
  1430. /* Free up any grants we have finished using */
  1431. do {
  1432. dp = queue->dealloc_prod;
  1433. /* Ensure we see all indices enqueued by all
  1434. * xenvif_zerocopy_callback().
  1435. */
  1436. smp_rmb();
  1437. while (dc != dp) {
  1438. BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
  1439. pending_idx =
  1440. queue->dealloc_ring[pending_index(dc++)];
  1441. pending_idx_release[gop - queue->tx_unmap_ops] =
  1442. pending_idx;
  1443. queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
  1444. queue->mmap_pages[pending_idx];
  1445. gnttab_set_unmap_op(gop,
  1446. idx_to_kaddr(queue, pending_idx),
  1447. GNTMAP_host_map,
  1448. queue->grant_tx_handle[pending_idx]);
  1449. xenvif_grant_handle_reset(queue, pending_idx);
  1450. ++gop;
  1451. }
  1452. } while (dp != queue->dealloc_prod);
  1453. queue->dealloc_cons = dc;
  1454. if (gop - queue->tx_unmap_ops > 0) {
  1455. int ret;
  1456. ret = gnttab_unmap_refs(queue->tx_unmap_ops,
  1457. NULL,
  1458. queue->pages_to_unmap,
  1459. gop - queue->tx_unmap_ops);
  1460. if (ret) {
  1461. netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
  1462. gop - queue->tx_unmap_ops, ret);
  1463. for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
  1464. if (gop[i].status != GNTST_okay)
  1465. netdev_err(queue->vif->dev,
  1466. " host_addr: 0x%llx handle: 0x%x status: %d\n",
  1467. gop[i].host_addr,
  1468. gop[i].handle,
  1469. gop[i].status);
  1470. }
  1471. BUG();
  1472. }
  1473. }
  1474. for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
  1475. xenvif_idx_release(queue, pending_idx_release[i],
  1476. XEN_NETIF_RSP_OKAY);
  1477. }
  1478. /* Called after netfront has transmitted */
  1479. int xenvif_tx_action(struct xenvif_queue *queue, int budget)
  1480. {
  1481. unsigned nr_mops, nr_cops = 0;
  1482. int work_done, ret;
  1483. if (unlikely(!tx_work_todo(queue)))
  1484. return 0;
  1485. xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
  1486. if (nr_cops == 0)
  1487. return 0;
  1488. gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
  1489. if (nr_mops != 0) {
  1490. ret = gnttab_map_refs(queue->tx_map_ops,
  1491. NULL,
  1492. queue->pages_to_map,
  1493. nr_mops);
  1494. BUG_ON(ret);
  1495. }
  1496. work_done = xenvif_tx_submit(queue);
  1497. return work_done;
  1498. }
  1499. static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
  1500. u8 status)
  1501. {
  1502. struct pending_tx_info *pending_tx_info;
  1503. pending_ring_idx_t index;
  1504. unsigned long flags;
  1505. pending_tx_info = &queue->pending_tx_info[pending_idx];
  1506. spin_lock_irqsave(&queue->response_lock, flags);
  1507. make_tx_response(queue, &pending_tx_info->req,
  1508. pending_tx_info->extra_count, status);
  1509. /* Release the pending index before pusing the Tx response so
  1510. * its available before a new Tx request is pushed by the
  1511. * frontend.
  1512. */
  1513. index = pending_index(queue->pending_prod++);
  1514. queue->pending_ring[index] = pending_idx;
  1515. push_tx_responses(queue);
  1516. spin_unlock_irqrestore(&queue->response_lock, flags);
  1517. }
  1518. static void make_tx_response(struct xenvif_queue *queue,
  1519. struct xen_netif_tx_request *txp,
  1520. unsigned int extra_count,
  1521. s8 st)
  1522. {
  1523. RING_IDX i = queue->tx.rsp_prod_pvt;
  1524. struct xen_netif_tx_response *resp;
  1525. resp = RING_GET_RESPONSE(&queue->tx, i);
  1526. resp->id = txp->id;
  1527. resp->status = st;
  1528. while (extra_count-- != 0)
  1529. RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
  1530. queue->tx.rsp_prod_pvt = ++i;
  1531. }
  1532. static void push_tx_responses(struct xenvif_queue *queue)
  1533. {
  1534. int notify;
  1535. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
  1536. if (notify)
  1537. notify_remote_via_irq(queue->tx_irq);
  1538. }
  1539. static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
  1540. u16 id,
  1541. s8 st,
  1542. u16 offset,
  1543. u16 size,
  1544. u16 flags)
  1545. {
  1546. RING_IDX i = queue->rx.rsp_prod_pvt;
  1547. struct xen_netif_rx_response *resp;
  1548. resp = RING_GET_RESPONSE(&queue->rx, i);
  1549. resp->offset = offset;
  1550. resp->flags = flags;
  1551. resp->id = id;
  1552. resp->status = (s16)size;
  1553. if (st < 0)
  1554. resp->status = (s16)st;
  1555. queue->rx.rsp_prod_pvt = ++i;
  1556. return resp;
  1557. }
  1558. void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
  1559. {
  1560. int ret;
  1561. struct gnttab_unmap_grant_ref tx_unmap_op;
  1562. gnttab_set_unmap_op(&tx_unmap_op,
  1563. idx_to_kaddr(queue, pending_idx),
  1564. GNTMAP_host_map,
  1565. queue->grant_tx_handle[pending_idx]);
  1566. xenvif_grant_handle_reset(queue, pending_idx);
  1567. ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
  1568. &queue->mmap_pages[pending_idx], 1);
  1569. if (ret) {
  1570. netdev_err(queue->vif->dev,
  1571. "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
  1572. ret,
  1573. pending_idx,
  1574. tx_unmap_op.host_addr,
  1575. tx_unmap_op.handle,
  1576. tx_unmap_op.status);
  1577. BUG();
  1578. }
  1579. }
  1580. static inline int tx_work_todo(struct xenvif_queue *queue)
  1581. {
  1582. if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
  1583. return 1;
  1584. return 0;
  1585. }
  1586. static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
  1587. {
  1588. return queue->dealloc_cons != queue->dealloc_prod;
  1589. }
  1590. void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
  1591. {
  1592. if (queue->tx.sring)
  1593. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
  1594. queue->tx.sring);
  1595. if (queue->rx.sring)
  1596. xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
  1597. queue->rx.sring);
  1598. }
  1599. int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
  1600. grant_ref_t tx_ring_ref,
  1601. grant_ref_t rx_ring_ref)
  1602. {
  1603. void *addr;
  1604. struct xen_netif_tx_sring *txs;
  1605. struct xen_netif_rx_sring *rxs;
  1606. int err = -ENOMEM;
  1607. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
  1608. &tx_ring_ref, 1, &addr);
  1609. if (err)
  1610. goto err;
  1611. txs = (struct xen_netif_tx_sring *)addr;
  1612. BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
  1613. err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
  1614. &rx_ring_ref, 1, &addr);
  1615. if (err)
  1616. goto err;
  1617. rxs = (struct xen_netif_rx_sring *)addr;
  1618. BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
  1619. return 0;
  1620. err:
  1621. xenvif_unmap_frontend_data_rings(queue);
  1622. return err;
  1623. }
  1624. static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
  1625. {
  1626. struct xenvif *vif = queue->vif;
  1627. queue->stalled = true;
  1628. /* At least one queue has stalled? Disable the carrier. */
  1629. spin_lock(&vif->lock);
  1630. if (vif->stalled_queues++ == 0) {
  1631. netdev_info(vif->dev, "Guest Rx stalled");
  1632. netif_carrier_off(vif->dev);
  1633. }
  1634. spin_unlock(&vif->lock);
  1635. }
  1636. static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
  1637. {
  1638. struct xenvif *vif = queue->vif;
  1639. queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
  1640. queue->stalled = false;
  1641. /* All queues are ready? Enable the carrier. */
  1642. spin_lock(&vif->lock);
  1643. if (--vif->stalled_queues == 0) {
  1644. netdev_info(vif->dev, "Guest Rx ready");
  1645. netif_carrier_on(vif->dev);
  1646. }
  1647. spin_unlock(&vif->lock);
  1648. }
  1649. static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
  1650. {
  1651. RING_IDX prod, cons;
  1652. prod = queue->rx.sring->req_prod;
  1653. cons = queue->rx.req_cons;
  1654. return !queue->stalled && prod - cons < 1
  1655. && time_after(jiffies,
  1656. queue->last_rx_time + queue->vif->stall_timeout);
  1657. }
  1658. static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
  1659. {
  1660. RING_IDX prod, cons;
  1661. prod = queue->rx.sring->req_prod;
  1662. cons = queue->rx.req_cons;
  1663. return queue->stalled && prod - cons >= 1;
  1664. }
  1665. static bool xenvif_have_rx_work(struct xenvif_queue *queue)
  1666. {
  1667. return xenvif_rx_ring_slots_available(queue)
  1668. || (queue->vif->stall_timeout &&
  1669. (xenvif_rx_queue_stalled(queue)
  1670. || xenvif_rx_queue_ready(queue)))
  1671. || kthread_should_stop()
  1672. || queue->vif->disabled;
  1673. }
  1674. static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
  1675. {
  1676. struct sk_buff *skb;
  1677. long timeout;
  1678. skb = skb_peek(&queue->rx_queue);
  1679. if (!skb)
  1680. return MAX_SCHEDULE_TIMEOUT;
  1681. timeout = XENVIF_RX_CB(skb)->expires - jiffies;
  1682. return timeout < 0 ? 0 : timeout;
  1683. }
  1684. /* Wait until the guest Rx thread has work.
  1685. *
  1686. * The timeout needs to be adjusted based on the current head of the
  1687. * queue (and not just the head at the beginning). In particular, if
  1688. * the queue is initially empty an infinite timeout is used and this
  1689. * needs to be reduced when a skb is queued.
  1690. *
  1691. * This cannot be done with wait_event_timeout() because it only
  1692. * calculates the timeout once.
  1693. */
  1694. static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
  1695. {
  1696. DEFINE_WAIT(wait);
  1697. if (xenvif_have_rx_work(queue))
  1698. return;
  1699. for (;;) {
  1700. long ret;
  1701. prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
  1702. if (xenvif_have_rx_work(queue))
  1703. break;
  1704. ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
  1705. if (!ret)
  1706. break;
  1707. }
  1708. finish_wait(&queue->wq, &wait);
  1709. }
  1710. int xenvif_kthread_guest_rx(void *data)
  1711. {
  1712. struct xenvif_queue *queue = data;
  1713. struct xenvif *vif = queue->vif;
  1714. if (!vif->stall_timeout)
  1715. xenvif_queue_carrier_on(queue);
  1716. for (;;) {
  1717. xenvif_wait_for_rx_work(queue);
  1718. if (kthread_should_stop())
  1719. break;
  1720. /* This frontend is found to be rogue, disable it in
  1721. * kthread context. Currently this is only set when
  1722. * netback finds out frontend sends malformed packet,
  1723. * but we cannot disable the interface in softirq
  1724. * context so we defer it here, if this thread is
  1725. * associated with queue 0.
  1726. */
  1727. if (unlikely(vif->disabled && queue->id == 0)) {
  1728. xenvif_carrier_off(vif);
  1729. break;
  1730. }
  1731. if (!skb_queue_empty(&queue->rx_queue))
  1732. xenvif_rx_action(queue);
  1733. /* If the guest hasn't provided any Rx slots for a
  1734. * while it's probably not responsive, drop the
  1735. * carrier so packets are dropped earlier.
  1736. */
  1737. if (vif->stall_timeout) {
  1738. if (xenvif_rx_queue_stalled(queue))
  1739. xenvif_queue_carrier_off(queue);
  1740. else if (xenvif_rx_queue_ready(queue))
  1741. xenvif_queue_carrier_on(queue);
  1742. }
  1743. /* Queued packets may have foreign pages from other
  1744. * domains. These cannot be queued indefinitely as
  1745. * this would starve guests of grant refs and transmit
  1746. * slots.
  1747. */
  1748. xenvif_rx_queue_drop_expired(queue);
  1749. xenvif_rx_queue_maybe_wake(queue);
  1750. cond_resched();
  1751. }
  1752. /* Bin any remaining skbs */
  1753. xenvif_rx_queue_purge(queue);
  1754. return 0;
  1755. }
  1756. static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
  1757. {
  1758. /* Dealloc thread must remain running until all inflight
  1759. * packets complete.
  1760. */
  1761. return kthread_should_stop() &&
  1762. !atomic_read(&queue->inflight_packets);
  1763. }
  1764. int xenvif_dealloc_kthread(void *data)
  1765. {
  1766. struct xenvif_queue *queue = data;
  1767. for (;;) {
  1768. wait_event_interruptible(queue->dealloc_wq,
  1769. tx_dealloc_work_todo(queue) ||
  1770. xenvif_dealloc_kthread_should_stop(queue));
  1771. if (xenvif_dealloc_kthread_should_stop(queue))
  1772. break;
  1773. xenvif_tx_dealloc_action(queue);
  1774. cond_resched();
  1775. }
  1776. /* Unmap anything remaining*/
  1777. if (tx_dealloc_work_todo(queue))
  1778. xenvif_tx_dealloc_action(queue);
  1779. return 0;
  1780. }
  1781. static void make_ctrl_response(struct xenvif *vif,
  1782. const struct xen_netif_ctrl_request *req,
  1783. u32 status, u32 data)
  1784. {
  1785. RING_IDX idx = vif->ctrl.rsp_prod_pvt;
  1786. struct xen_netif_ctrl_response rsp = {
  1787. .id = req->id,
  1788. .type = req->type,
  1789. .status = status,
  1790. .data = data,
  1791. };
  1792. *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
  1793. vif->ctrl.rsp_prod_pvt = ++idx;
  1794. }
  1795. static void push_ctrl_response(struct xenvif *vif)
  1796. {
  1797. int notify;
  1798. RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
  1799. if (notify)
  1800. notify_remote_via_irq(vif->ctrl_irq);
  1801. }
  1802. static void process_ctrl_request(struct xenvif *vif,
  1803. const struct xen_netif_ctrl_request *req)
  1804. {
  1805. u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
  1806. u32 data = 0;
  1807. switch (req->type) {
  1808. case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
  1809. status = xenvif_set_hash_alg(vif, req->data[0]);
  1810. break;
  1811. case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
  1812. status = xenvif_get_hash_flags(vif, &data);
  1813. break;
  1814. case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
  1815. status = xenvif_set_hash_flags(vif, req->data[0]);
  1816. break;
  1817. case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
  1818. status = xenvif_set_hash_key(vif, req->data[0],
  1819. req->data[1]);
  1820. break;
  1821. case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
  1822. status = XEN_NETIF_CTRL_STATUS_SUCCESS;
  1823. data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
  1824. break;
  1825. case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
  1826. status = xenvif_set_hash_mapping_size(vif,
  1827. req->data[0]);
  1828. break;
  1829. case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
  1830. status = xenvif_set_hash_mapping(vif, req->data[0],
  1831. req->data[1],
  1832. req->data[2]);
  1833. break;
  1834. default:
  1835. break;
  1836. }
  1837. make_ctrl_response(vif, req, status, data);
  1838. push_ctrl_response(vif);
  1839. }
  1840. static void xenvif_ctrl_action(struct xenvif *vif)
  1841. {
  1842. for (;;) {
  1843. RING_IDX req_prod, req_cons;
  1844. req_prod = vif->ctrl.sring->req_prod;
  1845. req_cons = vif->ctrl.req_cons;
  1846. /* Make sure we can see requests before we process them. */
  1847. rmb();
  1848. if (req_cons == req_prod)
  1849. break;
  1850. while (req_cons != req_prod) {
  1851. struct xen_netif_ctrl_request req;
  1852. RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
  1853. req_cons++;
  1854. process_ctrl_request(vif, &req);
  1855. }
  1856. vif->ctrl.req_cons = req_cons;
  1857. vif->ctrl.sring->req_event = req_cons + 1;
  1858. }
  1859. }
  1860. static bool xenvif_ctrl_work_todo(struct xenvif *vif)
  1861. {
  1862. if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
  1863. return 1;
  1864. return 0;
  1865. }
  1866. int xenvif_ctrl_kthread(void *data)
  1867. {
  1868. struct xenvif *vif = data;
  1869. for (;;) {
  1870. wait_event_interruptible(vif->ctrl_wq,
  1871. xenvif_ctrl_work_todo(vif) ||
  1872. kthread_should_stop());
  1873. if (kthread_should_stop())
  1874. break;
  1875. while (xenvif_ctrl_work_todo(vif))
  1876. xenvif_ctrl_action(vif);
  1877. cond_resched();
  1878. }
  1879. return 0;
  1880. }
  1881. static int __init netback_init(void)
  1882. {
  1883. int rc = 0;
  1884. if (!xen_domain())
  1885. return -ENODEV;
  1886. /* Allow as many queues as there are CPUs if user has not
  1887. * specified a value.
  1888. */
  1889. if (xenvif_max_queues == 0)
  1890. xenvif_max_queues = num_online_cpus();
  1891. if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
  1892. pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
  1893. fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
  1894. fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
  1895. }
  1896. rc = xenvif_xenbus_init();
  1897. if (rc)
  1898. goto failed_init;
  1899. #ifdef CONFIG_DEBUG_FS
  1900. xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
  1901. if (IS_ERR_OR_NULL(xen_netback_dbg_root))
  1902. pr_warn("Init of debugfs returned %ld!\n",
  1903. PTR_ERR(xen_netback_dbg_root));
  1904. #endif /* CONFIG_DEBUG_FS */
  1905. return 0;
  1906. failed_init:
  1907. return rc;
  1908. }
  1909. module_init(netback_init);
  1910. static void __exit netback_fini(void)
  1911. {
  1912. #ifdef CONFIG_DEBUG_FS
  1913. if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
  1914. debugfs_remove_recursive(xen_netback_dbg_root);
  1915. #endif /* CONFIG_DEBUG_FS */
  1916. xenvif_xenbus_fini();
  1917. }
  1918. module_exit(netback_fini);
  1919. MODULE_LICENSE("Dual BSD/GPL");
  1920. MODULE_ALIAS("xen-backend:vif");