page-coherent.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
  2. #define _ASM_ARM_XEN_PAGE_COHERENT_H
  3. #include <asm/page.h>
  4. #include <linux/dma-attrs.h>
  5. #include <linux/dma-mapping.h>
  6. void __xen_dma_map_page(struct device *hwdev, struct page *page,
  7. dma_addr_t dev_addr, unsigned long offset, size_t size,
  8. enum dma_data_direction dir, struct dma_attrs *attrs);
  9. void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
  10. size_t size, enum dma_data_direction dir,
  11. struct dma_attrs *attrs);
  12. void __xen_dma_sync_single_for_cpu(struct device *hwdev,
  13. dma_addr_t handle, size_t size, enum dma_data_direction dir);
  14. void __xen_dma_sync_single_for_device(struct device *hwdev,
  15. dma_addr_t handle, size_t size, enum dma_data_direction dir);
  16. static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
  17. dma_addr_t *dma_handle, gfp_t flags,
  18. struct dma_attrs *attrs)
  19. {
  20. return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
  21. }
  22. static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
  23. void *cpu_addr, dma_addr_t dma_handle,
  24. struct dma_attrs *attrs)
  25. {
  26. __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
  27. }
  28. static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
  29. dma_addr_t dev_addr, unsigned long offset, size_t size,
  30. enum dma_data_direction dir, struct dma_attrs *attrs)
  31. {
  32. bool local = PFN_DOWN(dev_addr) == page_to_pfn(page);
  33. /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise
  34. * is a foreign page grant-mapped in dom0. If the page is local we
  35. * can safely call the native dma_ops function, otherwise we call
  36. * the xen specific function. */
  37. if (local)
  38. __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
  39. else
  40. __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
  41. }
  42. static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
  43. size_t size, enum dma_data_direction dir,
  44. struct dma_attrs *attrs)
  45. {
  46. unsigned long pfn = PFN_DOWN(handle);
  47. /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will
  48. * always return false. If the page is local we can safely call the
  49. * native dma_ops function, otherwise we call the xen specific
  50. * function. */
  51. if (pfn_valid(pfn)) {
  52. if (__generic_dma_ops(hwdev)->unmap_page)
  53. __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
  54. } else
  55. __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
  56. }
  57. static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
  58. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  59. {
  60. unsigned long pfn = PFN_DOWN(handle);
  61. if (pfn_valid(pfn)) {
  62. if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
  63. __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
  64. } else
  65. __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
  66. }
  67. static inline void xen_dma_sync_single_for_device(struct device *hwdev,
  68. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  69. {
  70. unsigned long pfn = PFN_DOWN(handle);
  71. if (pfn_valid(pfn)) {
  72. if (__generic_dma_ops(hwdev)->sync_single_for_device)
  73. __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
  74. } else
  75. __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
  76. }
  77. #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */