swiotlb-xen.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682
  1. /*
  2. * Copyright 2010
  3. * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
  4. *
  5. * This code provides a IOMMU for Xen PV guests with PCI passthrough.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License v2.0 as published by
  9. * the Free Software Foundation
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * PV guests under Xen are running in an non-contiguous memory architecture.
  17. *
  18. * When PCI pass-through is utilized, this necessitates an IOMMU for
  19. * translating bus (DMA) to virtual and vice-versa and also providing a
  20. * mechanism to have contiguous pages for device drivers operations (say DMA
  21. * operations).
  22. *
  23. * Specifically, under Xen the Linux idea of pages is an illusion. It
  24. * assumes that pages start at zero and go up to the available memory. To
  25. * help with that, the Linux Xen MMU provides a lookup mechanism to
  26. * translate the page frame numbers (PFN) to machine frame numbers (MFN)
  27. * and vice-versa. The MFN are the "real" frame numbers. Furthermore
  28. * memory is not contiguous. Xen hypervisor stitches memory for guests
  29. * from different pools, which means there is no guarantee that PFN==MFN
  30. * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
  31. * allocated in descending order (high to low), meaning the guest might
  32. * never get any MFN's under the 4GB mark.
  33. *
  34. */
  35. #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  36. #include <linux/bootmem.h>
  37. #include <linux/dma-mapping.h>
  38. #include <linux/export.h>
  39. #include <xen/swiotlb-xen.h>
  40. #include <xen/page.h>
  41. #include <xen/xen-ops.h>
  42. #include <xen/hvc-console.h>
  43. #include <asm/dma-mapping.h>
  44. #include <asm/xen/page-coherent.h>
  45. #include <trace/events/swiotlb.h>
  46. /*
  47. * Used to do a quick range check in swiotlb_tbl_unmap_single and
  48. * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
  49. * API.
  50. */
  51. #ifndef CONFIG_X86
  52. static unsigned long dma_alloc_coherent_mask(struct device *dev,
  53. gfp_t gfp)
  54. {
  55. unsigned long dma_mask = 0;
  56. dma_mask = dev->coherent_dma_mask;
  57. if (!dma_mask)
  58. dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
  59. return dma_mask;
  60. }
  61. #endif
  62. static char *xen_io_tlb_start, *xen_io_tlb_end;
  63. static unsigned long xen_io_tlb_nslabs;
  64. /*
  65. * Quick lookup value of the bus address of the IOTLB.
  66. */
  67. static u64 start_dma_addr;
  68. /*
  69. * Both of these functions should avoid PFN_PHYS because phys_addr_t
  70. * can be 32bit when dma_addr_t is 64bit leading to a loss in
  71. * information if the shift is done before casting to 64bit.
  72. */
  73. static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
  74. {
  75. unsigned long bfn = pfn_to_bfn(PFN_DOWN(paddr));
  76. dma_addr_t dma = (dma_addr_t)bfn << PAGE_SHIFT;
  77. dma |= paddr & ~PAGE_MASK;
  78. return dma;
  79. }
  80. static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
  81. {
  82. unsigned long pfn = bfn_to_pfn(PFN_DOWN(baddr));
  83. dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
  84. phys_addr_t paddr = dma;
  85. paddr |= baddr & ~PAGE_MASK;
  86. return paddr;
  87. }
  88. static inline dma_addr_t xen_virt_to_bus(void *address)
  89. {
  90. return xen_phys_to_bus(virt_to_phys(address));
  91. }
  92. static int check_pages_physically_contiguous(unsigned long pfn,
  93. unsigned int offset,
  94. size_t length)
  95. {
  96. unsigned long next_bfn;
  97. int i;
  98. int nr_pages;
  99. next_bfn = pfn_to_bfn(pfn);
  100. nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
  101. for (i = 1; i < nr_pages; i++) {
  102. if (pfn_to_bfn(++pfn) != ++next_bfn)
  103. return 0;
  104. }
  105. return 1;
  106. }
  107. static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
  108. {
  109. unsigned long pfn = PFN_DOWN(p);
  110. unsigned int offset = p & ~PAGE_MASK;
  111. if (offset + size <= PAGE_SIZE)
  112. return 0;
  113. if (check_pages_physically_contiguous(pfn, offset, size))
  114. return 0;
  115. return 1;
  116. }
  117. static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
  118. {
  119. unsigned long bfn = PFN_DOWN(dma_addr);
  120. unsigned long pfn = bfn_to_local_pfn(bfn);
  121. phys_addr_t paddr;
  122. /* If the address is outside our domain, it CAN
  123. * have the same virtual address as another address
  124. * in our domain. Therefore _only_ check address within our domain.
  125. */
  126. if (pfn_valid(pfn)) {
  127. paddr = PFN_PHYS(pfn);
  128. return paddr >= virt_to_phys(xen_io_tlb_start) &&
  129. paddr < virt_to_phys(xen_io_tlb_end);
  130. }
  131. return 0;
  132. }
  133. static int max_dma_bits = 32;
  134. static int
  135. xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
  136. {
  137. int i, rc;
  138. int dma_bits;
  139. dma_addr_t dma_handle;
  140. phys_addr_t p = virt_to_phys(buf);
  141. dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
  142. i = 0;
  143. do {
  144. int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
  145. do {
  146. rc = xen_create_contiguous_region(
  147. p + (i << IO_TLB_SHIFT),
  148. get_order(slabs << IO_TLB_SHIFT),
  149. dma_bits, &dma_handle);
  150. } while (rc && dma_bits++ < max_dma_bits);
  151. if (rc)
  152. return rc;
  153. i += slabs;
  154. } while (i < nslabs);
  155. return 0;
  156. }
  157. static unsigned long xen_set_nslabs(unsigned long nr_tbl)
  158. {
  159. if (!nr_tbl) {
  160. xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
  161. xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
  162. } else
  163. xen_io_tlb_nslabs = nr_tbl;
  164. return xen_io_tlb_nslabs << IO_TLB_SHIFT;
  165. }
  166. enum xen_swiotlb_err {
  167. XEN_SWIOTLB_UNKNOWN = 0,
  168. XEN_SWIOTLB_ENOMEM,
  169. XEN_SWIOTLB_EFIXUP
  170. };
  171. static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
  172. {
  173. switch (err) {
  174. case XEN_SWIOTLB_ENOMEM:
  175. return "Cannot allocate Xen-SWIOTLB buffer\n";
  176. case XEN_SWIOTLB_EFIXUP:
  177. return "Failed to get contiguous memory for DMA from Xen!\n"\
  178. "You either: don't have the permissions, do not have"\
  179. " enough free memory under 4GB, or the hypervisor memory"\
  180. " is too fragmented!";
  181. default:
  182. break;
  183. }
  184. return "";
  185. }
  186. int __ref xen_swiotlb_init(int verbose, bool early)
  187. {
  188. unsigned long bytes, order;
  189. int rc = -ENOMEM;
  190. enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
  191. unsigned int repeat = 3;
  192. xen_io_tlb_nslabs = swiotlb_nr_tbl();
  193. retry:
  194. bytes = xen_set_nslabs(xen_io_tlb_nslabs);
  195. order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
  196. /*
  197. * Get IO TLB memory from any location.
  198. */
  199. if (early)
  200. xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
  201. else {
  202. #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
  203. #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
  204. while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
  205. xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
  206. if (xen_io_tlb_start)
  207. break;
  208. order--;
  209. }
  210. if (order != get_order(bytes)) {
  211. pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n",
  212. (PAGE_SIZE << order) >> 20);
  213. xen_io_tlb_nslabs = SLABS_PER_PAGE << order;
  214. bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
  215. }
  216. }
  217. if (!xen_io_tlb_start) {
  218. m_ret = XEN_SWIOTLB_ENOMEM;
  219. goto error;
  220. }
  221. xen_io_tlb_end = xen_io_tlb_start + bytes;
  222. /*
  223. * And replace that memory with pages under 4GB.
  224. */
  225. rc = xen_swiotlb_fixup(xen_io_tlb_start,
  226. bytes,
  227. xen_io_tlb_nslabs);
  228. if (rc) {
  229. if (early)
  230. free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
  231. else {
  232. free_pages((unsigned long)xen_io_tlb_start, order);
  233. xen_io_tlb_start = NULL;
  234. }
  235. m_ret = XEN_SWIOTLB_EFIXUP;
  236. goto error;
  237. }
  238. start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
  239. if (early) {
  240. if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
  241. verbose))
  242. panic("Cannot allocate SWIOTLB buffer");
  243. rc = 0;
  244. } else
  245. rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
  246. return rc;
  247. error:
  248. if (repeat--) {
  249. xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
  250. (xen_io_tlb_nslabs >> 1));
  251. pr_info("Lowering to %luMB\n",
  252. (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
  253. goto retry;
  254. }
  255. pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc);
  256. if (early)
  257. panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
  258. else
  259. free_pages((unsigned long)xen_io_tlb_start, order);
  260. return rc;
  261. }
  262. void *
  263. xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
  264. dma_addr_t *dma_handle, gfp_t flags,
  265. struct dma_attrs *attrs)
  266. {
  267. void *ret;
  268. int order = get_order(size);
  269. u64 dma_mask = DMA_BIT_MASK(32);
  270. phys_addr_t phys;
  271. dma_addr_t dev_addr;
  272. /*
  273. * Ignore region specifiers - the kernel's ideas of
  274. * pseudo-phys memory layout has nothing to do with the
  275. * machine physical layout. We can't allocate highmem
  276. * because we can't return a pointer to it.
  277. */
  278. flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
  279. /* On ARM this function returns an ioremap'ped virtual address for
  280. * which virt_to_phys doesn't return the corresponding physical
  281. * address. In fact on ARM virt_to_phys only works for kernel direct
  282. * mapped RAM memory. Also see comment below.
  283. */
  284. ret = xen_alloc_coherent_pages(hwdev, size, dma_handle, flags, attrs);
  285. if (!ret)
  286. return ret;
  287. if (hwdev && hwdev->coherent_dma_mask)
  288. dma_mask = dma_alloc_coherent_mask(hwdev, flags);
  289. /* At this point dma_handle is the physical address, next we are
  290. * going to set it to the machine address.
  291. * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
  292. * to *dma_handle. */
  293. phys = *dma_handle;
  294. dev_addr = xen_phys_to_bus(phys);
  295. if (((dev_addr + size - 1 <= dma_mask)) &&
  296. !range_straddles_page_boundary(phys, size))
  297. *dma_handle = dev_addr;
  298. else {
  299. if (xen_create_contiguous_region(phys, order,
  300. fls64(dma_mask), dma_handle) != 0) {
  301. xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
  302. return NULL;
  303. }
  304. }
  305. memset(ret, 0, size);
  306. return ret;
  307. }
  308. EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
  309. void
  310. xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
  311. dma_addr_t dev_addr, struct dma_attrs *attrs)
  312. {
  313. int order = get_order(size);
  314. phys_addr_t phys;
  315. u64 dma_mask = DMA_BIT_MASK(32);
  316. if (hwdev && hwdev->coherent_dma_mask)
  317. dma_mask = hwdev->coherent_dma_mask;
  318. /* do not use virt_to_phys because on ARM it doesn't return you the
  319. * physical address */
  320. phys = xen_bus_to_phys(dev_addr);
  321. if (((dev_addr + size - 1 > dma_mask)) ||
  322. range_straddles_page_boundary(phys, size))
  323. xen_destroy_contiguous_region(phys, order);
  324. xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
  325. }
  326. EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
  327. /*
  328. * Map a single buffer of the indicated size for DMA in streaming mode. The
  329. * physical address to use is returned.
  330. *
  331. * Once the device is given the dma address, the device owns this memory until
  332. * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
  333. */
  334. dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
  335. unsigned long offset, size_t size,
  336. enum dma_data_direction dir,
  337. struct dma_attrs *attrs)
  338. {
  339. phys_addr_t map, phys = page_to_phys(page) + offset;
  340. dma_addr_t dev_addr = xen_phys_to_bus(phys);
  341. BUG_ON(dir == DMA_NONE);
  342. /*
  343. * If the address happens to be in the device's DMA window,
  344. * we can safely return the device addr and not worry about bounce
  345. * buffering it.
  346. */
  347. if (dma_capable(dev, dev_addr, size) &&
  348. !range_straddles_page_boundary(phys, size) &&
  349. !xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) &&
  350. !swiotlb_force) {
  351. /* we are not interested in the dma_addr returned by
  352. * xen_dma_map_page, only in the potential cache flushes executed
  353. * by the function. */
  354. xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
  355. return dev_addr;
  356. }
  357. /*
  358. * Oh well, have to allocate and map a bounce buffer.
  359. */
  360. trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
  361. map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
  362. if (map == SWIOTLB_MAP_ERROR)
  363. return DMA_ERROR_CODE;
  364. xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
  365. dev_addr, map & ~PAGE_MASK, size, dir, attrs);
  366. dev_addr = xen_phys_to_bus(map);
  367. /*
  368. * Ensure that the address returned is DMA'ble
  369. */
  370. if (!dma_capable(dev, dev_addr, size)) {
  371. swiotlb_tbl_unmap_single(dev, map, size, dir);
  372. dev_addr = 0;
  373. }
  374. return dev_addr;
  375. }
  376. EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
  377. /*
  378. * Unmap a single streaming mode DMA translation. The dma_addr and size must
  379. * match what was provided for in a previous xen_swiotlb_map_page call. All
  380. * other usages are undefined.
  381. *
  382. * After this call, reads by the cpu to the buffer are guaranteed to see
  383. * whatever the device wrote there.
  384. */
  385. static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
  386. size_t size, enum dma_data_direction dir,
  387. struct dma_attrs *attrs)
  388. {
  389. phys_addr_t paddr = xen_bus_to_phys(dev_addr);
  390. BUG_ON(dir == DMA_NONE);
  391. xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
  392. /* NOTE: We use dev_addr here, not paddr! */
  393. if (is_xen_swiotlb_buffer(dev_addr)) {
  394. swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
  395. return;
  396. }
  397. if (dir != DMA_FROM_DEVICE)
  398. return;
  399. /*
  400. * phys_to_virt doesn't work with hihgmem page but we could
  401. * call dma_mark_clean() with hihgmem page here. However, we
  402. * are fine since dma_mark_clean() is null on POWERPC. We can
  403. * make dma_mark_clean() take a physical address if necessary.
  404. */
  405. dma_mark_clean(phys_to_virt(paddr), size);
  406. }
  407. void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
  408. size_t size, enum dma_data_direction dir,
  409. struct dma_attrs *attrs)
  410. {
  411. xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
  412. }
  413. EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
  414. /*
  415. * Make physical memory consistent for a single streaming mode DMA translation
  416. * after a transfer.
  417. *
  418. * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
  419. * using the cpu, yet do not wish to teardown the dma mapping, you must
  420. * call this function before doing so. At the next point you give the dma
  421. * address back to the card, you must first perform a
  422. * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
  423. */
  424. static void
  425. xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
  426. size_t size, enum dma_data_direction dir,
  427. enum dma_sync_target target)
  428. {
  429. phys_addr_t paddr = xen_bus_to_phys(dev_addr);
  430. BUG_ON(dir == DMA_NONE);
  431. if (target == SYNC_FOR_CPU)
  432. xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
  433. /* NOTE: We use dev_addr here, not paddr! */
  434. if (is_xen_swiotlb_buffer(dev_addr))
  435. swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
  436. if (target == SYNC_FOR_DEVICE)
  437. xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
  438. if (dir != DMA_FROM_DEVICE)
  439. return;
  440. dma_mark_clean(phys_to_virt(paddr), size);
  441. }
  442. void
  443. xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
  444. size_t size, enum dma_data_direction dir)
  445. {
  446. xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
  447. }
  448. EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
  449. void
  450. xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
  451. size_t size, enum dma_data_direction dir)
  452. {
  453. xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
  454. }
  455. EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
  456. /*
  457. * Map a set of buffers described by scatterlist in streaming mode for DMA.
  458. * This is the scatter-gather version of the above xen_swiotlb_map_page
  459. * interface. Here the scatter gather list elements are each tagged with the
  460. * appropriate dma address and length. They are obtained via
  461. * sg_dma_{address,length}(SG).
  462. *
  463. * NOTE: An implementation may be able to use a smaller number of
  464. * DMA address/length pairs than there are SG table elements.
  465. * (for example via virtual mapping capabilities)
  466. * The routine returns the number of addr/length pairs actually
  467. * used, at most nents.
  468. *
  469. * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
  470. * same here.
  471. */
  472. int
  473. xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
  474. int nelems, enum dma_data_direction dir,
  475. struct dma_attrs *attrs)
  476. {
  477. struct scatterlist *sg;
  478. int i;
  479. BUG_ON(dir == DMA_NONE);
  480. for_each_sg(sgl, sg, nelems, i) {
  481. phys_addr_t paddr = sg_phys(sg);
  482. dma_addr_t dev_addr = xen_phys_to_bus(paddr);
  483. if (swiotlb_force ||
  484. xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) ||
  485. !dma_capable(hwdev, dev_addr, sg->length) ||
  486. range_straddles_page_boundary(paddr, sg->length)) {
  487. phys_addr_t map = swiotlb_tbl_map_single(hwdev,
  488. start_dma_addr,
  489. sg_phys(sg),
  490. sg->length,
  491. dir);
  492. if (map == SWIOTLB_MAP_ERROR) {
  493. dev_warn(hwdev, "swiotlb buffer is full\n");
  494. /* Don't panic here, we expect map_sg users
  495. to do proper error handling. */
  496. xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
  497. attrs);
  498. sg_dma_len(sgl) = 0;
  499. return 0;
  500. }
  501. xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
  502. dev_addr,
  503. map & ~PAGE_MASK,
  504. sg->length,
  505. dir,
  506. attrs);
  507. sg->dma_address = xen_phys_to_bus(map);
  508. } else {
  509. /* we are not interested in the dma_addr returned by
  510. * xen_dma_map_page, only in the potential cache flushes executed
  511. * by the function. */
  512. xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
  513. dev_addr,
  514. paddr & ~PAGE_MASK,
  515. sg->length,
  516. dir,
  517. attrs);
  518. sg->dma_address = dev_addr;
  519. }
  520. sg_dma_len(sg) = sg->length;
  521. }
  522. return nelems;
  523. }
  524. EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
  525. /*
  526. * Unmap a set of streaming mode DMA translations. Again, cpu read rules
  527. * concerning calls here are the same as for swiotlb_unmap_page() above.
  528. */
  529. void
  530. xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
  531. int nelems, enum dma_data_direction dir,
  532. struct dma_attrs *attrs)
  533. {
  534. struct scatterlist *sg;
  535. int i;
  536. BUG_ON(dir == DMA_NONE);
  537. for_each_sg(sgl, sg, nelems, i)
  538. xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir, attrs);
  539. }
  540. EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
  541. /*
  542. * Make physical memory consistent for a set of streaming mode DMA translations
  543. * after a transfer.
  544. *
  545. * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
  546. * and usage.
  547. */
  548. static void
  549. xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
  550. int nelems, enum dma_data_direction dir,
  551. enum dma_sync_target target)
  552. {
  553. struct scatterlist *sg;
  554. int i;
  555. for_each_sg(sgl, sg, nelems, i)
  556. xen_swiotlb_sync_single(hwdev, sg->dma_address,
  557. sg_dma_len(sg), dir, target);
  558. }
  559. void
  560. xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
  561. int nelems, enum dma_data_direction dir)
  562. {
  563. xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
  564. }
  565. EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
  566. void
  567. xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
  568. int nelems, enum dma_data_direction dir)
  569. {
  570. xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
  571. }
  572. EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
  573. int
  574. xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
  575. {
  576. return !dma_addr;
  577. }
  578. EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
  579. /*
  580. * Return whether the given device DMA address mask can be supported
  581. * properly. For example, if your device can only drive the low 24-bits
  582. * during bus mastering, then you would pass 0x00ffffff as the mask to
  583. * this function.
  584. */
  585. int
  586. xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
  587. {
  588. return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
  589. }
  590. EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);
  591. int
  592. xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
  593. {
  594. if (!dev->dma_mask || !xen_swiotlb_dma_supported(dev, dma_mask))
  595. return -EIO;
  596. *dev->dma_mask = dma_mask;
  597. return 0;
  598. }
  599. EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);