virtio_ring.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252
  1. /* Virtio ring implementation.
  2. *
  3. * Copyright 2007 Rusty Russell IBM Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  18. */
  19. #include <linux/virtio.h>
  20. #include <linux/virtio_ring.h>
  21. #include <linux/virtio_config.h>
  22. #include <linux/device.h>
  23. #include <linux/slab.h>
  24. #include <linux/module.h>
  25. #include <linux/hrtimer.h>
  26. #include <linux/dma-mapping.h>
  27. #include <xen/xen.h>
  28. #ifdef DEBUG
  29. /* For development, we want to crash whenever the ring is screwed. */
  30. #define BAD_RING(_vq, fmt, args...) \
  31. do { \
  32. dev_err(&(_vq)->vq.vdev->dev, \
  33. "%s:"fmt, (_vq)->vq.name, ##args); \
  34. BUG(); \
  35. } while (0)
  36. /* Caller is supposed to guarantee no reentry. */
  37. #define START_USE(_vq) \
  38. do { \
  39. if ((_vq)->in_use) \
  40. panic("%s:in_use = %i\n", \
  41. (_vq)->vq.name, (_vq)->in_use); \
  42. (_vq)->in_use = __LINE__; \
  43. } while (0)
  44. #define END_USE(_vq) \
  45. do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
  46. #else
  47. #define BAD_RING(_vq, fmt, args...) \
  48. do { \
  49. dev_err(&_vq->vq.vdev->dev, \
  50. "%s:"fmt, (_vq)->vq.name, ##args); \
  51. (_vq)->broken = true; \
  52. } while (0)
  53. #define START_USE(vq)
  54. #define END_USE(vq)
  55. #endif
  56. struct vring_desc_state {
  57. void *data; /* Data for callback. */
  58. struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
  59. };
  60. struct vring_virtqueue {
  61. struct virtqueue vq;
  62. /* Actual memory layout for this queue */
  63. struct vring vring;
  64. /* Can we use weak barriers? */
  65. bool weak_barriers;
  66. /* Other side has made a mess, don't try any more. */
  67. bool broken;
  68. /* Host supports indirect buffers */
  69. bool indirect;
  70. /* Host publishes avail event idx */
  71. bool event;
  72. /* Head of free buffer list. */
  73. unsigned int free_head;
  74. /* Number we've added since last sync. */
  75. unsigned int num_added;
  76. /* Last used index we've seen. */
  77. u16 last_used_idx;
  78. /* Last written value to avail->flags */
  79. u16 avail_flags_shadow;
  80. /* Last written value to avail->idx in guest byte order */
  81. u16 avail_idx_shadow;
  82. /* How to notify other side. FIXME: commonalize hcalls! */
  83. bool (*notify)(struct virtqueue *vq);
  84. /* DMA, allocation, and size information */
  85. bool we_own_ring;
  86. size_t queue_size_in_bytes;
  87. dma_addr_t queue_dma_addr;
  88. #ifdef DEBUG
  89. /* They're supposed to lock for us. */
  90. unsigned int in_use;
  91. /* Figure out if their kicks are too delayed. */
  92. bool last_add_time_valid;
  93. ktime_t last_add_time;
  94. #endif
  95. /* Per-descriptor state. */
  96. struct vring_desc_state desc_state[];
  97. };
  98. #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
  99. /*
  100. * Modern virtio devices have feature bits to specify whether they need a
  101. * quirk and bypass the IOMMU. If not there, just use the DMA API.
  102. *
  103. * If there, the interaction between virtio and DMA API is messy.
  104. *
  105. * On most systems with virtio, physical addresses match bus addresses,
  106. * and it doesn't particularly matter whether we use the DMA API.
  107. *
  108. * On some systems, including Xen and any system with a physical device
  109. * that speaks virtio behind a physical IOMMU, we must use the DMA API
  110. * for virtio DMA to work at all.
  111. *
  112. * On other systems, including SPARC and PPC64, virtio-pci devices are
  113. * enumerated as though they are behind an IOMMU, but the virtio host
  114. * ignores the IOMMU, so we must either pretend that the IOMMU isn't
  115. * there or somehow map everything as the identity.
  116. *
  117. * For the time being, we preserve historic behavior and bypass the DMA
  118. * API.
  119. *
  120. * TODO: install a per-device DMA ops structure that does the right thing
  121. * taking into account all the above quirks, and use the DMA API
  122. * unconditionally on data path.
  123. */
  124. static bool vring_use_dma_api(struct virtio_device *vdev)
  125. {
  126. if (!virtio_has_iommu_quirk(vdev))
  127. return true;
  128. /* Otherwise, we are left to guess. */
  129. /*
  130. * In theory, it's possible to have a buggy QEMU-supposed
  131. * emulated Q35 IOMMU and Xen enabled at the same time. On
  132. * such a configuration, virtio has never worked and will
  133. * not work without an even larger kludge. Instead, enable
  134. * the DMA API if we're a Xen guest, which at least allows
  135. * all of the sensible Xen configurations to work correctly.
  136. */
  137. if (xen_domain())
  138. return true;
  139. return false;
  140. }
  141. /*
  142. * The DMA ops on various arches are rather gnarly right now, and
  143. * making all of the arch DMA ops work on the vring device itself
  144. * is a mess. For now, we use the parent device for DMA ops.
  145. */
  146. static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
  147. {
  148. return vq->vq.vdev->dev.parent;
  149. }
  150. /* Map one sg entry. */
  151. static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
  152. struct scatterlist *sg,
  153. enum dma_data_direction direction)
  154. {
  155. if (!vring_use_dma_api(vq->vq.vdev))
  156. return (dma_addr_t)sg_phys(sg);
  157. /*
  158. * We can't use dma_map_sg, because we don't use scatterlists in
  159. * the way it expects (we don't guarantee that the scatterlist
  160. * will exist for the lifetime of the mapping).
  161. */
  162. return dma_map_page(vring_dma_dev(vq),
  163. sg_page(sg), sg->offset, sg->length,
  164. direction);
  165. }
  166. static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
  167. void *cpu_addr, size_t size,
  168. enum dma_data_direction direction)
  169. {
  170. if (!vring_use_dma_api(vq->vq.vdev))
  171. return (dma_addr_t)virt_to_phys(cpu_addr);
  172. return dma_map_single(vring_dma_dev(vq),
  173. cpu_addr, size, direction);
  174. }
  175. static void vring_unmap_one(const struct vring_virtqueue *vq,
  176. struct vring_desc *desc)
  177. {
  178. u16 flags;
  179. if (!vring_use_dma_api(vq->vq.vdev))
  180. return;
  181. flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
  182. if (flags & VRING_DESC_F_INDIRECT) {
  183. dma_unmap_single(vring_dma_dev(vq),
  184. virtio64_to_cpu(vq->vq.vdev, desc->addr),
  185. virtio32_to_cpu(vq->vq.vdev, desc->len),
  186. (flags & VRING_DESC_F_WRITE) ?
  187. DMA_FROM_DEVICE : DMA_TO_DEVICE);
  188. } else {
  189. dma_unmap_page(vring_dma_dev(vq),
  190. virtio64_to_cpu(vq->vq.vdev, desc->addr),
  191. virtio32_to_cpu(vq->vq.vdev, desc->len),
  192. (flags & VRING_DESC_F_WRITE) ?
  193. DMA_FROM_DEVICE : DMA_TO_DEVICE);
  194. }
  195. }
  196. static int vring_mapping_error(const struct vring_virtqueue *vq,
  197. dma_addr_t addr)
  198. {
  199. if (!vring_use_dma_api(vq->vq.vdev))
  200. return 0;
  201. return dma_mapping_error(vring_dma_dev(vq), addr);
  202. }
  203. static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
  204. unsigned int total_sg, gfp_t gfp)
  205. {
  206. struct vring_desc *desc;
  207. unsigned int i;
  208. /*
  209. * We require lowmem mappings for the descriptors because
  210. * otherwise virt_to_phys will give us bogus addresses in the
  211. * virtqueue.
  212. */
  213. gfp &= ~__GFP_HIGHMEM;
  214. desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
  215. if (!desc)
  216. return NULL;
  217. for (i = 0; i < total_sg; i++)
  218. desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
  219. return desc;
  220. }
  221. static inline int virtqueue_add(struct virtqueue *_vq,
  222. struct scatterlist *sgs[],
  223. unsigned int total_sg,
  224. unsigned int out_sgs,
  225. unsigned int in_sgs,
  226. void *data,
  227. void *ctx,
  228. gfp_t gfp)
  229. {
  230. struct vring_virtqueue *vq = to_vvq(_vq);
  231. struct scatterlist *sg;
  232. struct vring_desc *desc;
  233. unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
  234. int head;
  235. bool indirect;
  236. START_USE(vq);
  237. BUG_ON(data == NULL);
  238. BUG_ON(ctx && vq->indirect);
  239. if (unlikely(vq->broken)) {
  240. END_USE(vq);
  241. return -EIO;
  242. }
  243. #ifdef DEBUG
  244. {
  245. ktime_t now = ktime_get();
  246. /* No kick or get, with .1 second between? Warn. */
  247. if (vq->last_add_time_valid)
  248. WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
  249. > 100);
  250. vq->last_add_time = now;
  251. vq->last_add_time_valid = true;
  252. }
  253. #endif
  254. BUG_ON(total_sg == 0);
  255. head = vq->free_head;
  256. /* If the host supports indirect descriptor tables, and we have multiple
  257. * buffers, then go indirect. FIXME: tune this threshold */
  258. if (vq->indirect && total_sg > 1 && vq->vq.num_free)
  259. desc = alloc_indirect(_vq, total_sg, gfp);
  260. else {
  261. desc = NULL;
  262. WARN_ON_ONCE(total_sg > vq->vring.num && !vq->indirect);
  263. }
  264. if (desc) {
  265. /* Use a single buffer which doesn't continue */
  266. indirect = true;
  267. /* Set up rest to use this indirect table. */
  268. i = 0;
  269. descs_used = 1;
  270. } else {
  271. indirect = false;
  272. desc = vq->vring.desc;
  273. i = head;
  274. descs_used = total_sg;
  275. }
  276. if (vq->vq.num_free < descs_used) {
  277. pr_debug("Can't add buf len %i - avail = %i\n",
  278. descs_used, vq->vq.num_free);
  279. /* FIXME: for historical reasons, we force a notify here if
  280. * there are outgoing parts to the buffer. Presumably the
  281. * host should service the ring ASAP. */
  282. if (out_sgs)
  283. vq->notify(&vq->vq);
  284. if (indirect)
  285. kfree(desc);
  286. END_USE(vq);
  287. return -ENOSPC;
  288. }
  289. for (n = 0; n < out_sgs; n++) {
  290. for (sg = sgs[n]; sg; sg = sg_next(sg)) {
  291. dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
  292. if (vring_mapping_error(vq, addr))
  293. goto unmap_release;
  294. desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
  295. desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
  296. desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
  297. prev = i;
  298. i = virtio16_to_cpu(_vq->vdev, desc[i].next);
  299. }
  300. }
  301. for (; n < (out_sgs + in_sgs); n++) {
  302. for (sg = sgs[n]; sg; sg = sg_next(sg)) {
  303. dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
  304. if (vring_mapping_error(vq, addr))
  305. goto unmap_release;
  306. desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
  307. desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
  308. desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
  309. prev = i;
  310. i = virtio16_to_cpu(_vq->vdev, desc[i].next);
  311. }
  312. }
  313. /* Last one doesn't continue. */
  314. desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
  315. if (indirect) {
  316. /* Now that the indirect table is filled in, map it. */
  317. dma_addr_t addr = vring_map_single(
  318. vq, desc, total_sg * sizeof(struct vring_desc),
  319. DMA_TO_DEVICE);
  320. if (vring_mapping_error(vq, addr))
  321. goto unmap_release;
  322. vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
  323. vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
  324. vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
  325. }
  326. /* We're using some buffers from the free list. */
  327. vq->vq.num_free -= descs_used;
  328. /* Update free pointer */
  329. if (indirect)
  330. vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
  331. else
  332. vq->free_head = i;
  333. /* Store token and indirect buffer state. */
  334. vq->desc_state[head].data = data;
  335. if (indirect)
  336. vq->desc_state[head].indir_desc = desc;
  337. else
  338. vq->desc_state[head].indir_desc = ctx;
  339. /* Put entry in available array (but don't update avail->idx until they
  340. * do sync). */
  341. avail = vq->avail_idx_shadow & (vq->vring.num - 1);
  342. vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
  343. /* Descriptors and available array need to be set before we expose the
  344. * new available array entries. */
  345. virtio_wmb(vq->weak_barriers);
  346. vq->avail_idx_shadow++;
  347. vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
  348. vq->num_added++;
  349. pr_debug("Added buffer head %i to %p\n", head, vq);
  350. END_USE(vq);
  351. /* This is very unlikely, but theoretically possible. Kick
  352. * just in case. */
  353. if (unlikely(vq->num_added == (1 << 16) - 1))
  354. virtqueue_kick(_vq);
  355. return 0;
  356. unmap_release:
  357. err_idx = i;
  358. i = head;
  359. for (n = 0; n < total_sg; n++) {
  360. if (i == err_idx)
  361. break;
  362. vring_unmap_one(vq, &desc[i]);
  363. i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
  364. }
  365. if (indirect)
  366. kfree(desc);
  367. END_USE(vq);
  368. return -EIO;
  369. }
  370. /**
  371. * virtqueue_add_sgs - expose buffers to other end
  372. * @vq: the struct virtqueue we're talking about.
  373. * @sgs: array of terminated scatterlists.
  374. * @out_num: the number of scatterlists readable by other side
  375. * @in_num: the number of scatterlists which are writable (after readable ones)
  376. * @data: the token identifying the buffer.
  377. * @gfp: how to do memory allocations (if necessary).
  378. *
  379. * Caller must ensure we don't call this with other virtqueue operations
  380. * at the same time (except where noted).
  381. *
  382. * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
  383. */
  384. int virtqueue_add_sgs(struct virtqueue *_vq,
  385. struct scatterlist *sgs[],
  386. unsigned int out_sgs,
  387. unsigned int in_sgs,
  388. void *data,
  389. gfp_t gfp)
  390. {
  391. unsigned int i, total_sg = 0;
  392. /* Count them first. */
  393. for (i = 0; i < out_sgs + in_sgs; i++) {
  394. struct scatterlist *sg;
  395. for (sg = sgs[i]; sg; sg = sg_next(sg))
  396. total_sg++;
  397. }
  398. return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
  399. data, NULL, gfp);
  400. }
  401. EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
  402. /**
  403. * virtqueue_add_outbuf - expose output buffers to other end
  404. * @vq: the struct virtqueue we're talking about.
  405. * @sg: scatterlist (must be well-formed and terminated!)
  406. * @num: the number of entries in @sg readable by other side
  407. * @data: the token identifying the buffer.
  408. * @gfp: how to do memory allocations (if necessary).
  409. *
  410. * Caller must ensure we don't call this with other virtqueue operations
  411. * at the same time (except where noted).
  412. *
  413. * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
  414. */
  415. int virtqueue_add_outbuf(struct virtqueue *vq,
  416. struct scatterlist *sg, unsigned int num,
  417. void *data,
  418. gfp_t gfp)
  419. {
  420. return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
  421. }
  422. EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
  423. /**
  424. * virtqueue_add_inbuf - expose input buffers to other end
  425. * @vq: the struct virtqueue we're talking about.
  426. * @sg: scatterlist (must be well-formed and terminated!)
  427. * @num: the number of entries in @sg writable by other side
  428. * @data: the token identifying the buffer.
  429. * @gfp: how to do memory allocations (if necessary).
  430. *
  431. * Caller must ensure we don't call this with other virtqueue operations
  432. * at the same time (except where noted).
  433. *
  434. * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
  435. */
  436. int virtqueue_add_inbuf(struct virtqueue *vq,
  437. struct scatterlist *sg, unsigned int num,
  438. void *data,
  439. gfp_t gfp)
  440. {
  441. return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
  442. }
  443. EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
  444. /**
  445. * virtqueue_add_inbuf_ctx - expose input buffers to other end
  446. * @vq: the struct virtqueue we're talking about.
  447. * @sg: scatterlist (must be well-formed and terminated!)
  448. * @num: the number of entries in @sg writable by other side
  449. * @data: the token identifying the buffer.
  450. * @ctx: extra context for the token
  451. * @gfp: how to do memory allocations (if necessary).
  452. *
  453. * Caller must ensure we don't call this with other virtqueue operations
  454. * at the same time (except where noted).
  455. *
  456. * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
  457. */
  458. int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
  459. struct scatterlist *sg, unsigned int num,
  460. void *data,
  461. void *ctx,
  462. gfp_t gfp)
  463. {
  464. return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
  465. }
  466. EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
  467. /**
  468. * virtqueue_kick_prepare - first half of split virtqueue_kick call.
  469. * @vq: the struct virtqueue
  470. *
  471. * Instead of virtqueue_kick(), you can do:
  472. * if (virtqueue_kick_prepare(vq))
  473. * virtqueue_notify(vq);
  474. *
  475. * This is sometimes useful because the virtqueue_kick_prepare() needs
  476. * to be serialized, but the actual virtqueue_notify() call does not.
  477. */
  478. bool virtqueue_kick_prepare(struct virtqueue *_vq)
  479. {
  480. struct vring_virtqueue *vq = to_vvq(_vq);
  481. u16 new, old;
  482. bool needs_kick;
  483. START_USE(vq);
  484. /* We need to expose available array entries before checking avail
  485. * event. */
  486. virtio_mb(vq->weak_barriers);
  487. old = vq->avail_idx_shadow - vq->num_added;
  488. new = vq->avail_idx_shadow;
  489. vq->num_added = 0;
  490. #ifdef DEBUG
  491. if (vq->last_add_time_valid) {
  492. WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
  493. vq->last_add_time)) > 100);
  494. }
  495. vq->last_add_time_valid = false;
  496. #endif
  497. if (vq->event) {
  498. needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
  499. new, old);
  500. } else {
  501. needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
  502. }
  503. END_USE(vq);
  504. return needs_kick;
  505. }
  506. EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
  507. /**
  508. * virtqueue_notify - second half of split virtqueue_kick call.
  509. * @vq: the struct virtqueue
  510. *
  511. * This does not need to be serialized.
  512. *
  513. * Returns false if host notify failed or queue is broken, otherwise true.
  514. */
  515. bool virtqueue_notify(struct virtqueue *_vq)
  516. {
  517. struct vring_virtqueue *vq = to_vvq(_vq);
  518. if (unlikely(vq->broken))
  519. return false;
  520. /* Prod other side to tell it about changes. */
  521. if (!vq->notify(_vq)) {
  522. vq->broken = true;
  523. return false;
  524. }
  525. return true;
  526. }
  527. EXPORT_SYMBOL_GPL(virtqueue_notify);
  528. /**
  529. * virtqueue_kick - update after add_buf
  530. * @vq: the struct virtqueue
  531. *
  532. * After one or more virtqueue_add_* calls, invoke this to kick
  533. * the other side.
  534. *
  535. * Caller must ensure we don't call this with other virtqueue
  536. * operations at the same time (except where noted).
  537. *
  538. * Returns false if kick failed, otherwise true.
  539. */
  540. bool virtqueue_kick(struct virtqueue *vq)
  541. {
  542. if (virtqueue_kick_prepare(vq))
  543. return virtqueue_notify(vq);
  544. return true;
  545. }
  546. EXPORT_SYMBOL_GPL(virtqueue_kick);
  547. static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
  548. void **ctx)
  549. {
  550. unsigned int i, j;
  551. __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
  552. /* Clear data ptr. */
  553. vq->desc_state[head].data = NULL;
  554. /* Put back on free list: unmap first-level descriptors and find end */
  555. i = head;
  556. while (vq->vring.desc[i].flags & nextflag) {
  557. vring_unmap_one(vq, &vq->vring.desc[i]);
  558. i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
  559. vq->vq.num_free++;
  560. }
  561. vring_unmap_one(vq, &vq->vring.desc[i]);
  562. vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
  563. vq->free_head = head;
  564. /* Plus final descriptor */
  565. vq->vq.num_free++;
  566. if (vq->indirect) {
  567. struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
  568. u32 len;
  569. /* Free the indirect table, if any, now that it's unmapped. */
  570. if (!indir_desc)
  571. return;
  572. len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
  573. BUG_ON(!(vq->vring.desc[head].flags &
  574. cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
  575. BUG_ON(len == 0 || len % sizeof(struct vring_desc));
  576. for (j = 0; j < len / sizeof(struct vring_desc); j++)
  577. vring_unmap_one(vq, &indir_desc[j]);
  578. kfree(indir_desc);
  579. vq->desc_state[head].indir_desc = NULL;
  580. } else if (ctx) {
  581. *ctx = vq->desc_state[head].indir_desc;
  582. }
  583. }
  584. static inline bool more_used(const struct vring_virtqueue *vq)
  585. {
  586. return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
  587. }
  588. /**
  589. * virtqueue_get_buf - get the next used buffer
  590. * @vq: the struct virtqueue we're talking about.
  591. * @len: the length written into the buffer
  592. *
  593. * If the device wrote data into the buffer, @len will be set to the
  594. * amount written. This means you don't need to clear the buffer
  595. * beforehand to ensure there's no data leakage in the case of short
  596. * writes.
  597. *
  598. * Caller must ensure we don't call this with other virtqueue
  599. * operations at the same time (except where noted).
  600. *
  601. * Returns NULL if there are no used buffers, or the "data" token
  602. * handed to virtqueue_add_*().
  603. */
  604. void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
  605. void **ctx)
  606. {
  607. struct vring_virtqueue *vq = to_vvq(_vq);
  608. void *ret;
  609. unsigned int i;
  610. u16 last_used;
  611. START_USE(vq);
  612. if (unlikely(vq->broken)) {
  613. END_USE(vq);
  614. return NULL;
  615. }
  616. if (!more_used(vq)) {
  617. pr_debug("No more buffers in queue\n");
  618. END_USE(vq);
  619. return NULL;
  620. }
  621. /* Only get used array entries after they have been exposed by host. */
  622. virtio_rmb(vq->weak_barriers);
  623. last_used = (vq->last_used_idx & (vq->vring.num - 1));
  624. i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
  625. *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
  626. if (unlikely(i >= vq->vring.num)) {
  627. BAD_RING(vq, "id %u out of range\n", i);
  628. return NULL;
  629. }
  630. if (unlikely(!vq->desc_state[i].data)) {
  631. BAD_RING(vq, "id %u is not a head!\n", i);
  632. return NULL;
  633. }
  634. /* detach_buf clears data, so grab it now. */
  635. ret = vq->desc_state[i].data;
  636. detach_buf(vq, i, ctx);
  637. vq->last_used_idx++;
  638. /* If we expect an interrupt for the next entry, tell host
  639. * by writing event index and flush out the write before
  640. * the read in the next get_buf call. */
  641. if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
  642. virtio_store_mb(vq->weak_barriers,
  643. &vring_used_event(&vq->vring),
  644. cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
  645. #ifdef DEBUG
  646. vq->last_add_time_valid = false;
  647. #endif
  648. END_USE(vq);
  649. return ret;
  650. }
  651. EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
  652. void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
  653. {
  654. return virtqueue_get_buf_ctx(_vq, len, NULL);
  655. }
  656. EXPORT_SYMBOL_GPL(virtqueue_get_buf);
  657. /**
  658. * virtqueue_disable_cb - disable callbacks
  659. * @vq: the struct virtqueue we're talking about.
  660. *
  661. * Note that this is not necessarily synchronous, hence unreliable and only
  662. * useful as an optimization.
  663. *
  664. * Unlike other operations, this need not be serialized.
  665. */
  666. void virtqueue_disable_cb(struct virtqueue *_vq)
  667. {
  668. struct vring_virtqueue *vq = to_vvq(_vq);
  669. if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
  670. vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
  671. if (!vq->event)
  672. vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
  673. }
  674. }
  675. EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
  676. /**
  677. * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
  678. * @vq: the struct virtqueue we're talking about.
  679. *
  680. * This re-enables callbacks; it returns current queue state
  681. * in an opaque unsigned value. This value should be later tested by
  682. * virtqueue_poll, to detect a possible race between the driver checking for
  683. * more work, and enabling callbacks.
  684. *
  685. * Caller must ensure we don't call this with other virtqueue
  686. * operations at the same time (except where noted).
  687. */
  688. unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
  689. {
  690. struct vring_virtqueue *vq = to_vvq(_vq);
  691. u16 last_used_idx;
  692. START_USE(vq);
  693. /* We optimistically turn back on interrupts, then check if there was
  694. * more to do. */
  695. /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
  696. * either clear the flags bit or point the event index at the next
  697. * entry. Always do both to keep code simple. */
  698. if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
  699. vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
  700. if (!vq->event)
  701. vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
  702. }
  703. vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
  704. END_USE(vq);
  705. return last_used_idx;
  706. }
  707. EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
  708. /**
  709. * virtqueue_poll - query pending used buffers
  710. * @vq: the struct virtqueue we're talking about.
  711. * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
  712. *
  713. * Returns "true" if there are pending used buffers in the queue.
  714. *
  715. * This does not need to be serialized.
  716. */
  717. bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
  718. {
  719. struct vring_virtqueue *vq = to_vvq(_vq);
  720. virtio_mb(vq->weak_barriers);
  721. return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
  722. }
  723. EXPORT_SYMBOL_GPL(virtqueue_poll);
  724. /**
  725. * virtqueue_enable_cb - restart callbacks after disable_cb.
  726. * @vq: the struct virtqueue we're talking about.
  727. *
  728. * This re-enables callbacks; it returns "false" if there are pending
  729. * buffers in the queue, to detect a possible race between the driver
  730. * checking for more work, and enabling callbacks.
  731. *
  732. * Caller must ensure we don't call this with other virtqueue
  733. * operations at the same time (except where noted).
  734. */
  735. bool virtqueue_enable_cb(struct virtqueue *_vq)
  736. {
  737. unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
  738. return !virtqueue_poll(_vq, last_used_idx);
  739. }
  740. EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
  741. /**
  742. * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
  743. * @vq: the struct virtqueue we're talking about.
  744. *
  745. * This re-enables callbacks but hints to the other side to delay
  746. * interrupts until most of the available buffers have been processed;
  747. * it returns "false" if there are many pending buffers in the queue,
  748. * to detect a possible race between the driver checking for more work,
  749. * and enabling callbacks.
  750. *
  751. * Caller must ensure we don't call this with other virtqueue
  752. * operations at the same time (except where noted).
  753. */
  754. bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
  755. {
  756. struct vring_virtqueue *vq = to_vvq(_vq);
  757. u16 bufs;
  758. START_USE(vq);
  759. /* We optimistically turn back on interrupts, then check if there was
  760. * more to do. */
  761. /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
  762. * either clear the flags bit or point the event index at the next
  763. * entry. Always update the event index to keep code simple. */
  764. if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
  765. vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
  766. if (!vq->event)
  767. vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
  768. }
  769. /* TODO: tune this threshold */
  770. bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
  771. virtio_store_mb(vq->weak_barriers,
  772. &vring_used_event(&vq->vring),
  773. cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
  774. if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
  775. END_USE(vq);
  776. return false;
  777. }
  778. END_USE(vq);
  779. return true;
  780. }
  781. EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
  782. /**
  783. * virtqueue_detach_unused_buf - detach first unused buffer
  784. * @vq: the struct virtqueue we're talking about.
  785. *
  786. * Returns NULL or the "data" token handed to virtqueue_add_*().
  787. * This is not valid on an active queue; it is useful only for device
  788. * shutdown.
  789. */
  790. void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
  791. {
  792. struct vring_virtqueue *vq = to_vvq(_vq);
  793. unsigned int i;
  794. void *buf;
  795. START_USE(vq);
  796. for (i = 0; i < vq->vring.num; i++) {
  797. if (!vq->desc_state[i].data)
  798. continue;
  799. /* detach_buf clears data, so grab it now. */
  800. buf = vq->desc_state[i].data;
  801. detach_buf(vq, i, NULL);
  802. vq->avail_idx_shadow--;
  803. vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
  804. END_USE(vq);
  805. return buf;
  806. }
  807. /* That should have freed everything. */
  808. BUG_ON(vq->vq.num_free != vq->vring.num);
  809. END_USE(vq);
  810. return NULL;
  811. }
  812. EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
  813. irqreturn_t vring_interrupt(int irq, void *_vq)
  814. {
  815. struct vring_virtqueue *vq = to_vvq(_vq);
  816. if (!more_used(vq)) {
  817. pr_debug("virtqueue interrupt with no work for %p\n", vq);
  818. return IRQ_NONE;
  819. }
  820. if (unlikely(vq->broken))
  821. return IRQ_HANDLED;
  822. pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
  823. if (vq->vq.callback)
  824. vq->vq.callback(&vq->vq);
  825. return IRQ_HANDLED;
  826. }
  827. EXPORT_SYMBOL_GPL(vring_interrupt);
  828. struct virtqueue *__vring_new_virtqueue(unsigned int index,
  829. struct vring vring,
  830. struct virtio_device *vdev,
  831. bool weak_barriers,
  832. bool context,
  833. bool (*notify)(struct virtqueue *),
  834. void (*callback)(struct virtqueue *),
  835. const char *name)
  836. {
  837. unsigned int i;
  838. struct vring_virtqueue *vq;
  839. vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
  840. GFP_KERNEL);
  841. if (!vq)
  842. return NULL;
  843. vq->vring = vring;
  844. vq->vq.callback = callback;
  845. vq->vq.vdev = vdev;
  846. vq->vq.name = name;
  847. vq->vq.num_free = vring.num;
  848. vq->vq.index = index;
  849. vq->we_own_ring = false;
  850. vq->queue_dma_addr = 0;
  851. vq->queue_size_in_bytes = 0;
  852. vq->notify = notify;
  853. vq->weak_barriers = weak_barriers;
  854. vq->broken = false;
  855. vq->last_used_idx = 0;
  856. vq->avail_flags_shadow = 0;
  857. vq->avail_idx_shadow = 0;
  858. vq->num_added = 0;
  859. list_add_tail(&vq->vq.list, &vdev->vqs);
  860. #ifdef DEBUG
  861. vq->in_use = false;
  862. vq->last_add_time_valid = false;
  863. #endif
  864. vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
  865. !context;
  866. vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
  867. /* No callback? Tell other side not to bother us. */
  868. if (!callback) {
  869. vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
  870. if (!vq->event)
  871. vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
  872. }
  873. /* Put everything in free lists. */
  874. vq->free_head = 0;
  875. for (i = 0; i < vring.num-1; i++)
  876. vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
  877. memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
  878. return &vq->vq;
  879. }
  880. EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
  881. static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
  882. dma_addr_t *dma_handle, gfp_t flag)
  883. {
  884. if (vring_use_dma_api(vdev)) {
  885. return dma_alloc_coherent(vdev->dev.parent, size,
  886. dma_handle, flag);
  887. } else {
  888. void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
  889. if (queue) {
  890. phys_addr_t phys_addr = virt_to_phys(queue);
  891. *dma_handle = (dma_addr_t)phys_addr;
  892. /*
  893. * Sanity check: make sure we dind't truncate
  894. * the address. The only arches I can find that
  895. * have 64-bit phys_addr_t but 32-bit dma_addr_t
  896. * are certain non-highmem MIPS and x86
  897. * configurations, but these configurations
  898. * should never allocate physical pages above 32
  899. * bits, so this is fine. Just in case, throw a
  900. * warning and abort if we end up with an
  901. * unrepresentable address.
  902. */
  903. if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
  904. free_pages_exact(queue, PAGE_ALIGN(size));
  905. return NULL;
  906. }
  907. }
  908. return queue;
  909. }
  910. }
  911. static void vring_free_queue(struct virtio_device *vdev, size_t size,
  912. void *queue, dma_addr_t dma_handle)
  913. {
  914. if (vring_use_dma_api(vdev)) {
  915. dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
  916. } else {
  917. free_pages_exact(queue, PAGE_ALIGN(size));
  918. }
  919. }
  920. struct virtqueue *vring_create_virtqueue(
  921. unsigned int index,
  922. unsigned int num,
  923. unsigned int vring_align,
  924. struct virtio_device *vdev,
  925. bool weak_barriers,
  926. bool may_reduce_num,
  927. bool context,
  928. bool (*notify)(struct virtqueue *),
  929. void (*callback)(struct virtqueue *),
  930. const char *name)
  931. {
  932. struct virtqueue *vq;
  933. void *queue = NULL;
  934. dma_addr_t dma_addr;
  935. size_t queue_size_in_bytes;
  936. struct vring vring;
  937. /* We assume num is a power of 2. */
  938. if (num & (num - 1)) {
  939. dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
  940. return NULL;
  941. }
  942. /* TODO: allocate each queue chunk individually */
  943. for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
  944. queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
  945. &dma_addr,
  946. GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
  947. if (queue)
  948. break;
  949. }
  950. if (!num)
  951. return NULL;
  952. if (!queue) {
  953. /* Try to get a single page. You are my only hope! */
  954. queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
  955. &dma_addr, GFP_KERNEL|__GFP_ZERO);
  956. }
  957. if (!queue)
  958. return NULL;
  959. queue_size_in_bytes = vring_size(num, vring_align);
  960. vring_init(&vring, num, queue, vring_align);
  961. vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
  962. notify, callback, name);
  963. if (!vq) {
  964. vring_free_queue(vdev, queue_size_in_bytes, queue,
  965. dma_addr);
  966. return NULL;
  967. }
  968. to_vvq(vq)->queue_dma_addr = dma_addr;
  969. to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
  970. to_vvq(vq)->we_own_ring = true;
  971. return vq;
  972. }
  973. EXPORT_SYMBOL_GPL(vring_create_virtqueue);
  974. struct virtqueue *vring_new_virtqueue(unsigned int index,
  975. unsigned int num,
  976. unsigned int vring_align,
  977. struct virtio_device *vdev,
  978. bool weak_barriers,
  979. bool context,
  980. void *pages,
  981. bool (*notify)(struct virtqueue *vq),
  982. void (*callback)(struct virtqueue *vq),
  983. const char *name)
  984. {
  985. struct vring vring;
  986. vring_init(&vring, num, pages, vring_align);
  987. return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
  988. notify, callback, name);
  989. }
  990. EXPORT_SYMBOL_GPL(vring_new_virtqueue);
  991. void vring_del_virtqueue(struct virtqueue *_vq)
  992. {
  993. struct vring_virtqueue *vq = to_vvq(_vq);
  994. if (vq->we_own_ring) {
  995. vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
  996. vq->vring.desc, vq->queue_dma_addr);
  997. }
  998. list_del(&_vq->list);
  999. kfree(vq);
  1000. }
  1001. EXPORT_SYMBOL_GPL(vring_del_virtqueue);
  1002. /* Manipulates transport-specific feature bits. */
  1003. void vring_transport_features(struct virtio_device *vdev)
  1004. {
  1005. unsigned int i;
  1006. for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
  1007. switch (i) {
  1008. case VIRTIO_RING_F_INDIRECT_DESC:
  1009. break;
  1010. case VIRTIO_RING_F_EVENT_IDX:
  1011. break;
  1012. case VIRTIO_F_VERSION_1:
  1013. break;
  1014. case VIRTIO_F_IOMMU_PLATFORM:
  1015. break;
  1016. default:
  1017. /* We don't understand this bit. */
  1018. __virtio_clear_bit(vdev, i);
  1019. }
  1020. }
  1021. }
  1022. EXPORT_SYMBOL_GPL(vring_transport_features);
  1023. /**
  1024. * virtqueue_get_vring_size - return the size of the virtqueue's vring
  1025. * @vq: the struct virtqueue containing the vring of interest.
  1026. *
  1027. * Returns the size of the vring. This is mainly used for boasting to
  1028. * userspace. Unlike other operations, this need not be serialized.
  1029. */
  1030. unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
  1031. {
  1032. struct vring_virtqueue *vq = to_vvq(_vq);
  1033. return vq->vring.num;
  1034. }
  1035. EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
  1036. bool virtqueue_is_broken(struct virtqueue *_vq)
  1037. {
  1038. struct vring_virtqueue *vq = to_vvq(_vq);
  1039. return vq->broken;
  1040. }
  1041. EXPORT_SYMBOL_GPL(virtqueue_is_broken);
  1042. /*
  1043. * This should prevent the device from being used, allowing drivers to
  1044. * recover. You may need to grab appropriate locks to flush.
  1045. */
  1046. void virtio_break_device(struct virtio_device *dev)
  1047. {
  1048. struct virtqueue *_vq;
  1049. list_for_each_entry(_vq, &dev->vqs, list) {
  1050. struct vring_virtqueue *vq = to_vvq(_vq);
  1051. vq->broken = true;
  1052. }
  1053. }
  1054. EXPORT_SYMBOL_GPL(virtio_break_device);
  1055. dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
  1056. {
  1057. struct vring_virtqueue *vq = to_vvq(_vq);
  1058. BUG_ON(!vq->we_own_ring);
  1059. return vq->queue_dma_addr;
  1060. }
  1061. EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
  1062. dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
  1063. {
  1064. struct vring_virtqueue *vq = to_vvq(_vq);
  1065. BUG_ON(!vq->we_own_ring);
  1066. return vq->queue_dma_addr +
  1067. ((char *)vq->vring.avail - (char *)vq->vring.desc);
  1068. }
  1069. EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
  1070. dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
  1071. {
  1072. struct vring_virtqueue *vq = to_vvq(_vq);
  1073. BUG_ON(!vq->we_own_ring);
  1074. return vq->queue_dma_addr +
  1075. ((char *)vq->vring.used - (char *)vq->vring.desc);
  1076. }
  1077. EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
  1078. const struct vring *virtqueue_get_vring(struct virtqueue *vq)
  1079. {
  1080. return &to_vvq(vq)->vring;
  1081. }
  1082. EXPORT_SYMBOL_GPL(virtqueue_get_vring);
  1083. MODULE_LICENSE("GPL");