dma-mapping.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. #ifndef _LINUX_DMA_MAPPING_H
  2. #define _LINUX_DMA_MAPPING_H
  3. #include <linux/sizes.h>
  4. #include <linux/string.h>
  5. #include <linux/device.h>
  6. #include <linux/err.h>
  7. #include <linux/dma-debug.h>
  8. #include <linux/dma-direction.h>
  9. #include <linux/scatterlist.h>
  10. #include <linux/kmemcheck.h>
  11. #include <linux/bug.h>
  12. /**
  13. * List of possible attributes associated with a DMA mapping. The semantics
  14. * of each attribute should be defined in Documentation/DMA-attributes.txt.
  15. *
  16. * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
  17. * forces all pending DMA writes to complete.
  18. */
  19. #define DMA_ATTR_WRITE_BARRIER (1UL << 0)
  20. /*
  21. * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
  22. * may be weakly ordered, that is that reads and writes may pass each other.
  23. */
  24. #define DMA_ATTR_WEAK_ORDERING (1UL << 1)
  25. /*
  26. * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
  27. * buffered to improve performance.
  28. */
  29. #define DMA_ATTR_WRITE_COMBINE (1UL << 2)
  30. /*
  31. * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
  32. * consistent or non-consistent memory as it sees fit.
  33. */
  34. #define DMA_ATTR_NON_CONSISTENT (1UL << 3)
  35. /*
  36. * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
  37. * virtual mapping for the allocated buffer.
  38. */
  39. #define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
  40. /*
  41. * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
  42. * the CPU cache for the given buffer assuming that it has been already
  43. * transferred to 'device' domain.
  44. */
  45. #define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
  46. /*
  47. * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
  48. * in physical memory.
  49. */
  50. #define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
  51. /*
  52. * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
  53. * that it's probably not worth the time to try to allocate memory to in a way
  54. * that gives better TLB efficiency.
  55. */
  56. #define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
  57. /*
  58. * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
  59. * allocation failure reports (similarly to __GFP_NOWARN).
  60. */
  61. #define DMA_ATTR_NO_WARN (1UL << 8)
  62. /*
  63. * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
  64. * accessible at an elevated privilege level (and ideally inaccessible or
  65. * at least read-only at lesser-privileged levels).
  66. */
  67. #define DMA_ATTR_PRIVILEGED (1UL << 9)
  68. /*
  69. * A dma_addr_t can hold any valid DMA or bus address for the platform.
  70. * It can be given to a device to use as a DMA source or target. A CPU cannot
  71. * reference a dma_addr_t directly because there may be translation between
  72. * its physical address space and the bus address space.
  73. */
  74. struct dma_map_ops {
  75. void* (*alloc)(struct device *dev, size_t size,
  76. dma_addr_t *dma_handle, gfp_t gfp,
  77. unsigned long attrs);
  78. void (*free)(struct device *dev, size_t size,
  79. void *vaddr, dma_addr_t dma_handle,
  80. unsigned long attrs);
  81. int (*mmap)(struct device *, struct vm_area_struct *,
  82. void *, dma_addr_t, size_t,
  83. unsigned long attrs);
  84. int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
  85. dma_addr_t, size_t, unsigned long attrs);
  86. dma_addr_t (*map_page)(struct device *dev, struct page *page,
  87. unsigned long offset, size_t size,
  88. enum dma_data_direction dir,
  89. unsigned long attrs);
  90. void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
  91. size_t size, enum dma_data_direction dir,
  92. unsigned long attrs);
  93. /*
  94. * map_sg returns 0 on error and a value > 0 on success.
  95. * It should never return a value < 0.
  96. */
  97. int (*map_sg)(struct device *dev, struct scatterlist *sg,
  98. int nents, enum dma_data_direction dir,
  99. unsigned long attrs);
  100. void (*unmap_sg)(struct device *dev,
  101. struct scatterlist *sg, int nents,
  102. enum dma_data_direction dir,
  103. unsigned long attrs);
  104. dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
  105. size_t size, enum dma_data_direction dir,
  106. unsigned long attrs);
  107. void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
  108. size_t size, enum dma_data_direction dir,
  109. unsigned long attrs);
  110. void (*sync_single_for_cpu)(struct device *dev,
  111. dma_addr_t dma_handle, size_t size,
  112. enum dma_data_direction dir);
  113. void (*sync_single_for_device)(struct device *dev,
  114. dma_addr_t dma_handle, size_t size,
  115. enum dma_data_direction dir);
  116. void (*sync_sg_for_cpu)(struct device *dev,
  117. struct scatterlist *sg, int nents,
  118. enum dma_data_direction dir);
  119. void (*sync_sg_for_device)(struct device *dev,
  120. struct scatterlist *sg, int nents,
  121. enum dma_data_direction dir);
  122. int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
  123. int (*dma_supported)(struct device *dev, u64 mask);
  124. int (*set_dma_mask)(struct device *dev, u64 mask);
  125. #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
  126. u64 (*get_required_mask)(struct device *dev);
  127. #endif
  128. int is_phys;
  129. };
  130. extern const struct dma_map_ops dma_noop_ops;
  131. extern const struct dma_map_ops dma_virt_ops;
  132. #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
  133. #define DMA_MASK_NONE 0x0ULL
  134. static inline int valid_dma_direction(int dma_direction)
  135. {
  136. return ((dma_direction == DMA_BIDIRECTIONAL) ||
  137. (dma_direction == DMA_TO_DEVICE) ||
  138. (dma_direction == DMA_FROM_DEVICE));
  139. }
  140. static inline int is_device_dma_capable(struct device *dev)
  141. {
  142. return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
  143. }
  144. #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  145. /*
  146. * These three functions are only for dma allocator.
  147. * Don't use them in device drivers.
  148. */
  149. int dma_alloc_from_coherent(struct device *dev, ssize_t size,
  150. dma_addr_t *dma_handle, void **ret);
  151. int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
  152. int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
  153. void *cpu_addr, size_t size, int *ret);
  154. #else
  155. #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
  156. #define dma_release_from_coherent(dev, order, vaddr) (0)
  157. #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
  158. #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  159. #ifdef CONFIG_HAS_DMA
  160. #include <asm/dma-mapping.h>
  161. static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
  162. {
  163. if (dev && dev->dma_ops)
  164. return dev->dma_ops;
  165. return get_arch_dma_ops(dev ? dev->bus : NULL);
  166. }
  167. static inline void set_dma_ops(struct device *dev,
  168. const struct dma_map_ops *dma_ops)
  169. {
  170. dev->dma_ops = dma_ops;
  171. }
  172. #else
  173. /*
  174. * Define the dma api to allow compilation but not linking of
  175. * dma dependent code. Code that depends on the dma-mapping
  176. * API needs to set 'depends on HAS_DMA' in its Kconfig
  177. */
  178. extern const struct dma_map_ops bad_dma_ops;
  179. static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
  180. {
  181. return &bad_dma_ops;
  182. }
  183. #endif
  184. static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
  185. size_t size,
  186. enum dma_data_direction dir,
  187. unsigned long attrs)
  188. {
  189. const struct dma_map_ops *ops = get_dma_ops(dev);
  190. dma_addr_t addr;
  191. kmemcheck_mark_initialized(ptr, size);
  192. BUG_ON(!valid_dma_direction(dir));
  193. addr = ops->map_page(dev, virt_to_page(ptr),
  194. offset_in_page(ptr), size,
  195. dir, attrs);
  196. debug_dma_map_page(dev, virt_to_page(ptr),
  197. offset_in_page(ptr), size,
  198. dir, addr, true);
  199. return addr;
  200. }
  201. static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
  202. size_t size,
  203. enum dma_data_direction dir,
  204. unsigned long attrs)
  205. {
  206. const struct dma_map_ops *ops = get_dma_ops(dev);
  207. BUG_ON(!valid_dma_direction(dir));
  208. if (ops->unmap_page)
  209. ops->unmap_page(dev, addr, size, dir, attrs);
  210. debug_dma_unmap_page(dev, addr, size, dir, true);
  211. }
  212. /*
  213. * dma_maps_sg_attrs returns 0 on error and > 0 on success.
  214. * It should never return a value < 0.
  215. */
  216. static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
  217. int nents, enum dma_data_direction dir,
  218. unsigned long attrs)
  219. {
  220. const struct dma_map_ops *ops = get_dma_ops(dev);
  221. int i, ents;
  222. struct scatterlist *s;
  223. for_each_sg(sg, s, nents, i)
  224. kmemcheck_mark_initialized(sg_virt(s), s->length);
  225. BUG_ON(!valid_dma_direction(dir));
  226. ents = ops->map_sg(dev, sg, nents, dir, attrs);
  227. BUG_ON(ents < 0);
  228. debug_dma_map_sg(dev, sg, nents, ents, dir);
  229. return ents;
  230. }
  231. static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
  232. int nents, enum dma_data_direction dir,
  233. unsigned long attrs)
  234. {
  235. const struct dma_map_ops *ops = get_dma_ops(dev);
  236. BUG_ON(!valid_dma_direction(dir));
  237. debug_dma_unmap_sg(dev, sg, nents, dir);
  238. if (ops->unmap_sg)
  239. ops->unmap_sg(dev, sg, nents, dir, attrs);
  240. }
  241. static inline dma_addr_t dma_map_page_attrs(struct device *dev,
  242. struct page *page,
  243. size_t offset, size_t size,
  244. enum dma_data_direction dir,
  245. unsigned long attrs)
  246. {
  247. const struct dma_map_ops *ops = get_dma_ops(dev);
  248. dma_addr_t addr;
  249. kmemcheck_mark_initialized(page_address(page) + offset, size);
  250. BUG_ON(!valid_dma_direction(dir));
  251. addr = ops->map_page(dev, page, offset, size, dir, attrs);
  252. debug_dma_map_page(dev, page, offset, size, dir, addr, false);
  253. return addr;
  254. }
  255. static inline void dma_unmap_page_attrs(struct device *dev,
  256. dma_addr_t addr, size_t size,
  257. enum dma_data_direction dir,
  258. unsigned long attrs)
  259. {
  260. const struct dma_map_ops *ops = get_dma_ops(dev);
  261. BUG_ON(!valid_dma_direction(dir));
  262. if (ops->unmap_page)
  263. ops->unmap_page(dev, addr, size, dir, attrs);
  264. debug_dma_unmap_page(dev, addr, size, dir, false);
  265. }
  266. static inline dma_addr_t dma_map_resource(struct device *dev,
  267. phys_addr_t phys_addr,
  268. size_t size,
  269. enum dma_data_direction dir,
  270. unsigned long attrs)
  271. {
  272. const struct dma_map_ops *ops = get_dma_ops(dev);
  273. dma_addr_t addr;
  274. BUG_ON(!valid_dma_direction(dir));
  275. /* Don't allow RAM to be mapped */
  276. BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
  277. addr = phys_addr;
  278. if (ops->map_resource)
  279. addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
  280. debug_dma_map_resource(dev, phys_addr, size, dir, addr);
  281. return addr;
  282. }
  283. static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
  284. size_t size, enum dma_data_direction dir,
  285. unsigned long attrs)
  286. {
  287. const struct dma_map_ops *ops = get_dma_ops(dev);
  288. BUG_ON(!valid_dma_direction(dir));
  289. if (ops->unmap_resource)
  290. ops->unmap_resource(dev, addr, size, dir, attrs);
  291. debug_dma_unmap_resource(dev, addr, size, dir);
  292. }
  293. static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
  294. size_t size,
  295. enum dma_data_direction dir)
  296. {
  297. const struct dma_map_ops *ops = get_dma_ops(dev);
  298. BUG_ON(!valid_dma_direction(dir));
  299. if (ops->sync_single_for_cpu)
  300. ops->sync_single_for_cpu(dev, addr, size, dir);
  301. debug_dma_sync_single_for_cpu(dev, addr, size, dir);
  302. }
  303. static inline void dma_sync_single_for_device(struct device *dev,
  304. dma_addr_t addr, size_t size,
  305. enum dma_data_direction dir)
  306. {
  307. const struct dma_map_ops *ops = get_dma_ops(dev);
  308. BUG_ON(!valid_dma_direction(dir));
  309. if (ops->sync_single_for_device)
  310. ops->sync_single_for_device(dev, addr, size, dir);
  311. debug_dma_sync_single_for_device(dev, addr, size, dir);
  312. }
  313. static inline void dma_sync_single_range_for_cpu(struct device *dev,
  314. dma_addr_t addr,
  315. unsigned long offset,
  316. size_t size,
  317. enum dma_data_direction dir)
  318. {
  319. const struct dma_map_ops *ops = get_dma_ops(dev);
  320. BUG_ON(!valid_dma_direction(dir));
  321. if (ops->sync_single_for_cpu)
  322. ops->sync_single_for_cpu(dev, addr + offset, size, dir);
  323. debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
  324. }
  325. static inline void dma_sync_single_range_for_device(struct device *dev,
  326. dma_addr_t addr,
  327. unsigned long offset,
  328. size_t size,
  329. enum dma_data_direction dir)
  330. {
  331. const struct dma_map_ops *ops = get_dma_ops(dev);
  332. BUG_ON(!valid_dma_direction(dir));
  333. if (ops->sync_single_for_device)
  334. ops->sync_single_for_device(dev, addr + offset, size, dir);
  335. debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
  336. }
  337. static inline void
  338. dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
  339. int nelems, enum dma_data_direction dir)
  340. {
  341. const struct dma_map_ops *ops = get_dma_ops(dev);
  342. BUG_ON(!valid_dma_direction(dir));
  343. if (ops->sync_sg_for_cpu)
  344. ops->sync_sg_for_cpu(dev, sg, nelems, dir);
  345. debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
  346. }
  347. static inline void
  348. dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
  349. int nelems, enum dma_data_direction dir)
  350. {
  351. const struct dma_map_ops *ops = get_dma_ops(dev);
  352. BUG_ON(!valid_dma_direction(dir));
  353. if (ops->sync_sg_for_device)
  354. ops->sync_sg_for_device(dev, sg, nelems, dir);
  355. debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
  356. }
  357. #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
  358. #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
  359. #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
  360. #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
  361. #define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
  362. #define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
  363. extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
  364. void *cpu_addr, dma_addr_t dma_addr, size_t size);
  365. void *dma_common_contiguous_remap(struct page *page, size_t size,
  366. unsigned long vm_flags,
  367. pgprot_t prot, const void *caller);
  368. void *dma_common_pages_remap(struct page **pages, size_t size,
  369. unsigned long vm_flags, pgprot_t prot,
  370. const void *caller);
  371. void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
  372. /**
  373. * dma_mmap_attrs - map a coherent DMA allocation into user space
  374. * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
  375. * @vma: vm_area_struct describing requested user mapping
  376. * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
  377. * @handle: device-view address returned from dma_alloc_attrs
  378. * @size: size of memory originally requested in dma_alloc_attrs
  379. * @attrs: attributes of mapping properties requested in dma_alloc_attrs
  380. *
  381. * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
  382. * into user space. The coherent DMA buffer must not be freed by the
  383. * driver until the user space mapping has been released.
  384. */
  385. static inline int
  386. dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
  387. dma_addr_t dma_addr, size_t size, unsigned long attrs)
  388. {
  389. const struct dma_map_ops *ops = get_dma_ops(dev);
  390. BUG_ON(!ops);
  391. if (ops->mmap)
  392. return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
  393. return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
  394. }
  395. #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
  396. int
  397. dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
  398. void *cpu_addr, dma_addr_t dma_addr, size_t size);
  399. static inline int
  400. dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
  401. dma_addr_t dma_addr, size_t size,
  402. unsigned long attrs)
  403. {
  404. const struct dma_map_ops *ops = get_dma_ops(dev);
  405. BUG_ON(!ops);
  406. if (ops->get_sgtable)
  407. return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
  408. attrs);
  409. return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
  410. }
  411. #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
  412. #ifndef arch_dma_alloc_attrs
  413. #define arch_dma_alloc_attrs(dev, flag) (true)
  414. #endif
  415. static inline void *dma_alloc_attrs(struct device *dev, size_t size,
  416. dma_addr_t *dma_handle, gfp_t flag,
  417. unsigned long attrs)
  418. {
  419. const struct dma_map_ops *ops = get_dma_ops(dev);
  420. void *cpu_addr;
  421. BUG_ON(!ops);
  422. if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
  423. return cpu_addr;
  424. if (!arch_dma_alloc_attrs(&dev, &flag))
  425. return NULL;
  426. if (!ops->alloc)
  427. return NULL;
  428. cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
  429. debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
  430. return cpu_addr;
  431. }
  432. static inline void dma_free_attrs(struct device *dev, size_t size,
  433. void *cpu_addr, dma_addr_t dma_handle,
  434. unsigned long attrs)
  435. {
  436. const struct dma_map_ops *ops = get_dma_ops(dev);
  437. BUG_ON(!ops);
  438. WARN_ON(irqs_disabled());
  439. if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
  440. return;
  441. if (!ops->free || !cpu_addr)
  442. return;
  443. debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
  444. ops->free(dev, size, cpu_addr, dma_handle, attrs);
  445. }
  446. static inline void *dma_alloc_coherent(struct device *dev, size_t size,
  447. dma_addr_t *dma_handle, gfp_t flag)
  448. {
  449. return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
  450. }
  451. static inline void dma_free_coherent(struct device *dev, size_t size,
  452. void *cpu_addr, dma_addr_t dma_handle)
  453. {
  454. return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
  455. }
  456. static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
  457. dma_addr_t *dma_handle, gfp_t gfp)
  458. {
  459. return dma_alloc_attrs(dev, size, dma_handle, gfp,
  460. DMA_ATTR_NON_CONSISTENT);
  461. }
  462. static inline void dma_free_noncoherent(struct device *dev, size_t size,
  463. void *cpu_addr, dma_addr_t dma_handle)
  464. {
  465. dma_free_attrs(dev, size, cpu_addr, dma_handle,
  466. DMA_ATTR_NON_CONSISTENT);
  467. }
  468. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  469. {
  470. debug_dma_mapping_error(dev, dma_addr);
  471. if (get_dma_ops(dev)->mapping_error)
  472. return get_dma_ops(dev)->mapping_error(dev, dma_addr);
  473. return 0;
  474. }
  475. #ifndef HAVE_ARCH_DMA_SUPPORTED
  476. static inline int dma_supported(struct device *dev, u64 mask)
  477. {
  478. const struct dma_map_ops *ops = get_dma_ops(dev);
  479. if (!ops)
  480. return 0;
  481. if (!ops->dma_supported)
  482. return 1;
  483. return ops->dma_supported(dev, mask);
  484. }
  485. #endif
  486. #ifndef HAVE_ARCH_DMA_SET_MASK
  487. static inline int dma_set_mask(struct device *dev, u64 mask)
  488. {
  489. const struct dma_map_ops *ops = get_dma_ops(dev);
  490. if (ops->set_dma_mask)
  491. return ops->set_dma_mask(dev, mask);
  492. if (!dev->dma_mask || !dma_supported(dev, mask))
  493. return -EIO;
  494. *dev->dma_mask = mask;
  495. return 0;
  496. }
  497. #endif
  498. static inline u64 dma_get_mask(struct device *dev)
  499. {
  500. if (dev && dev->dma_mask && *dev->dma_mask)
  501. return *dev->dma_mask;
  502. return DMA_BIT_MASK(32);
  503. }
  504. #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
  505. int dma_set_coherent_mask(struct device *dev, u64 mask);
  506. #else
  507. static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
  508. {
  509. if (!dma_supported(dev, mask))
  510. return -EIO;
  511. dev->coherent_dma_mask = mask;
  512. return 0;
  513. }
  514. #endif
  515. /*
  516. * Set both the DMA mask and the coherent DMA mask to the same thing.
  517. * Note that we don't check the return value from dma_set_coherent_mask()
  518. * as the DMA API guarantees that the coherent DMA mask can be set to
  519. * the same or smaller than the streaming DMA mask.
  520. */
  521. static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
  522. {
  523. int rc = dma_set_mask(dev, mask);
  524. if (rc == 0)
  525. dma_set_coherent_mask(dev, mask);
  526. return rc;
  527. }
  528. /*
  529. * Similar to the above, except it deals with the case where the device
  530. * does not have dev->dma_mask appropriately setup.
  531. */
  532. static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
  533. {
  534. dev->dma_mask = &dev->coherent_dma_mask;
  535. return dma_set_mask_and_coherent(dev, mask);
  536. }
  537. extern u64 dma_get_required_mask(struct device *dev);
  538. #ifndef arch_setup_dma_ops
  539. static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
  540. u64 size, const struct iommu_ops *iommu,
  541. bool coherent) { }
  542. #endif
  543. #ifndef arch_teardown_dma_ops
  544. static inline void arch_teardown_dma_ops(struct device *dev) { }
  545. #endif
  546. static inline unsigned int dma_get_max_seg_size(struct device *dev)
  547. {
  548. if (dev->dma_parms && dev->dma_parms->max_segment_size)
  549. return dev->dma_parms->max_segment_size;
  550. return SZ_64K;
  551. }
  552. static inline unsigned int dma_set_max_seg_size(struct device *dev,
  553. unsigned int size)
  554. {
  555. if (dev->dma_parms) {
  556. dev->dma_parms->max_segment_size = size;
  557. return 0;
  558. }
  559. return -EIO;
  560. }
  561. static inline unsigned long dma_get_seg_boundary(struct device *dev)
  562. {
  563. if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
  564. return dev->dma_parms->segment_boundary_mask;
  565. return DMA_BIT_MASK(32);
  566. }
  567. static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
  568. {
  569. if (dev->dma_parms) {
  570. dev->dma_parms->segment_boundary_mask = mask;
  571. return 0;
  572. }
  573. return -EIO;
  574. }
  575. #ifndef dma_max_pfn
  576. static inline unsigned long dma_max_pfn(struct device *dev)
  577. {
  578. return *dev->dma_mask >> PAGE_SHIFT;
  579. }
  580. #endif
  581. static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
  582. dma_addr_t *dma_handle, gfp_t flag)
  583. {
  584. void *ret = dma_alloc_coherent(dev, size, dma_handle,
  585. flag | __GFP_ZERO);
  586. return ret;
  587. }
  588. #ifdef CONFIG_HAS_DMA
  589. static inline int dma_get_cache_alignment(void)
  590. {
  591. #ifdef ARCH_DMA_MINALIGN
  592. return ARCH_DMA_MINALIGN;
  593. #endif
  594. return 1;
  595. }
  596. #endif
  597. /* flags for the coherent memory api */
  598. #define DMA_MEMORY_MAP 0x01
  599. #define DMA_MEMORY_IO 0x02
  600. #define DMA_MEMORY_INCLUDES_CHILDREN 0x04
  601. #define DMA_MEMORY_EXCLUSIVE 0x08
  602. #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  603. int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  604. dma_addr_t device_addr, size_t size, int flags);
  605. void dma_release_declared_memory(struct device *dev);
  606. void *dma_mark_declared_memory_occupied(struct device *dev,
  607. dma_addr_t device_addr, size_t size);
  608. #else
  609. static inline int
  610. dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
  611. dma_addr_t device_addr, size_t size, int flags)
  612. {
  613. return 0;
  614. }
  615. static inline void
  616. dma_release_declared_memory(struct device *dev)
  617. {
  618. }
  619. static inline void *
  620. dma_mark_declared_memory_occupied(struct device *dev,
  621. dma_addr_t device_addr, size_t size)
  622. {
  623. return ERR_PTR(-EBUSY);
  624. }
  625. #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  626. #ifdef CONFIG_HAS_DMA
  627. int dma_configure(struct device *dev);
  628. void dma_deconfigure(struct device *dev);
  629. #else
  630. static inline int dma_configure(struct device *dev)
  631. {
  632. return 0;
  633. }
  634. static inline void dma_deconfigure(struct device *dev) {}
  635. #endif
  636. /*
  637. * Managed DMA API
  638. */
  639. extern void *dmam_alloc_coherent(struct device *dev, size_t size,
  640. dma_addr_t *dma_handle, gfp_t gfp);
  641. extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
  642. dma_addr_t dma_handle);
  643. extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
  644. dma_addr_t *dma_handle, gfp_t gfp);
  645. extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
  646. dma_addr_t dma_handle);
  647. #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
  648. extern int dmam_declare_coherent_memory(struct device *dev,
  649. phys_addr_t phys_addr,
  650. dma_addr_t device_addr, size_t size,
  651. int flags);
  652. extern void dmam_release_declared_memory(struct device *dev);
  653. #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  654. static inline int dmam_declare_coherent_memory(struct device *dev,
  655. phys_addr_t phys_addr, dma_addr_t device_addr,
  656. size_t size, gfp_t gfp)
  657. {
  658. return 0;
  659. }
  660. static inline void dmam_release_declared_memory(struct device *dev)
  661. {
  662. }
  663. #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
  664. static inline void *dma_alloc_wc(struct device *dev, size_t size,
  665. dma_addr_t *dma_addr, gfp_t gfp)
  666. {
  667. return dma_alloc_attrs(dev, size, dma_addr, gfp,
  668. DMA_ATTR_WRITE_COMBINE);
  669. }
  670. #ifndef dma_alloc_writecombine
  671. #define dma_alloc_writecombine dma_alloc_wc
  672. #endif
  673. static inline void dma_free_wc(struct device *dev, size_t size,
  674. void *cpu_addr, dma_addr_t dma_addr)
  675. {
  676. return dma_free_attrs(dev, size, cpu_addr, dma_addr,
  677. DMA_ATTR_WRITE_COMBINE);
  678. }
  679. #ifndef dma_free_writecombine
  680. #define dma_free_writecombine dma_free_wc
  681. #endif
  682. static inline int dma_mmap_wc(struct device *dev,
  683. struct vm_area_struct *vma,
  684. void *cpu_addr, dma_addr_t dma_addr,
  685. size_t size)
  686. {
  687. return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
  688. DMA_ATTR_WRITE_COMBINE);
  689. }
  690. #ifndef dma_mmap_writecombine
  691. #define dma_mmap_writecombine dma_mmap_wc
  692. #endif
  693. #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
  694. #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
  695. #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
  696. #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
  697. #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
  698. #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
  699. #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
  700. #else
  701. #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
  702. #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
  703. #define dma_unmap_addr(PTR, ADDR_NAME) (0)
  704. #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
  705. #define dma_unmap_len(PTR, LEN_NAME) (0)
  706. #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
  707. #endif
  708. #endif