page-coherent.h 3.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
  2. #define _ASM_ARM_XEN_PAGE_COHERENT_H
  3. #include <asm/page.h>
  4. #include <linux/dma-attrs.h>
  5. #include <linux/dma-mapping.h>
  6. void __xen_dma_map_page(struct device *hwdev, struct page *page,
  7. dma_addr_t dev_addr, unsigned long offset, size_t size,
  8. enum dma_data_direction dir, struct dma_attrs *attrs);
  9. void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
  10. size_t size, enum dma_data_direction dir,
  11. struct dma_attrs *attrs);
  12. void __xen_dma_sync_single_for_cpu(struct device *hwdev,
  13. dma_addr_t handle, size_t size, enum dma_data_direction dir);
  14. void __xen_dma_sync_single_for_device(struct device *hwdev,
  15. dma_addr_t handle, size_t size, enum dma_data_direction dir);
  16. static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
  17. dma_addr_t *dma_handle, gfp_t flags,
  18. struct dma_attrs *attrs)
  19. {
  20. return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
  21. }
  22. static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
  23. void *cpu_addr, dma_addr_t dma_handle,
  24. struct dma_attrs *attrs)
  25. {
  26. __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
  27. }
  28. static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
  29. dma_addr_t dev_addr, unsigned long offset, size_t size,
  30. enum dma_data_direction dir, struct dma_attrs *attrs)
  31. {
  32. bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
  33. /*
  34. * Dom0 is mapped 1:1, while the Linux page can be spanned accross
  35. * multiple Xen page, it's not possible to have a mix of local and
  36. * foreign Xen page. So if the first xen_pfn == mfn the page is local
  37. * otherwise it's a foreign page grant-mapped in dom0. If the page is
  38. * local we can safely call the native dma_ops function, otherwise we
  39. * call the xen specific function.
  40. */
  41. if (local)
  42. __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
  43. else
  44. __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
  45. }
  46. static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
  47. size_t size, enum dma_data_direction dir,
  48. struct dma_attrs *attrs)
  49. {
  50. unsigned long pfn = PFN_DOWN(handle);
  51. /*
  52. * Dom0 is mapped 1:1, while the Linux page can be spanned accross
  53. * multiple Xen page, it's not possible to have a mix of local and
  54. * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
  55. * foreign mfn will always return false. If the page is local we can
  56. * safely call the native dma_ops function, otherwise we call the xen
  57. * specific function.
  58. */
  59. if (pfn_valid(pfn)) {
  60. if (__generic_dma_ops(hwdev)->unmap_page)
  61. __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
  62. } else
  63. __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
  64. }
  65. static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
  66. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  67. {
  68. unsigned long pfn = PFN_DOWN(handle);
  69. if (pfn_valid(pfn)) {
  70. if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
  71. __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
  72. } else
  73. __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
  74. }
  75. static inline void xen_dma_sync_single_for_device(struct device *hwdev,
  76. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  77. {
  78. unsigned long pfn = PFN_DOWN(handle);
  79. if (pfn_valid(pfn)) {
  80. if (__generic_dma_ops(hwdev)->sync_single_for_device)
  81. __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
  82. } else
  83. __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
  84. }
  85. #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */