mm32.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. #include <linux/cpu.h>
  2. #include <linux/dma-mapping.h>
  3. #include <linux/gfp.h>
  4. #include <linux/highmem.h>
  5. #include <xen/features.h>
  6. static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
  7. static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
  8. static int alloc_xen_mm32_scratch_page(int cpu)
  9. {
  10. struct page *page;
  11. unsigned long virt;
  12. pmd_t *pmdp;
  13. pte_t *ptep;
  14. if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
  15. return 0;
  16. page = alloc_page(GFP_KERNEL);
  17. if (page == NULL) {
  18. pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
  19. return -ENOMEM;
  20. }
  21. virt = (unsigned long)__va(page_to_phys(page));
  22. pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
  23. ptep = pte_offset_kernel(pmdp, virt);
  24. per_cpu(xen_mm32_scratch_virt, cpu) = virt;
  25. per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
  26. return 0;
  27. }
  28. static int xen_mm32_cpu_notify(struct notifier_block *self,
  29. unsigned long action, void *hcpu)
  30. {
  31. int cpu = (long)hcpu;
  32. switch (action) {
  33. case CPU_UP_PREPARE:
  34. if (alloc_xen_mm32_scratch_page(cpu))
  35. return NOTIFY_BAD;
  36. break;
  37. default:
  38. break;
  39. }
  40. return NOTIFY_OK;
  41. }
  42. static struct notifier_block xen_mm32_cpu_notifier = {
  43. .notifier_call = xen_mm32_cpu_notify,
  44. };
  45. static void* xen_mm32_remap_page(dma_addr_t handle)
  46. {
  47. unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
  48. pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
  49. *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
  50. local_flush_tlb_kernel_page(virt);
  51. return (void*)virt;
  52. }
  53. static void xen_mm32_unmap(void *vaddr)
  54. {
  55. put_cpu_var(xen_mm32_scratch_virt);
  56. }
  57. /* functions called by SWIOTLB */
  58. static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
  59. size_t size, enum dma_data_direction dir,
  60. void (*op)(const void *, size_t, int))
  61. {
  62. unsigned long pfn;
  63. size_t left = size;
  64. pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
  65. offset %= PAGE_SIZE;
  66. do {
  67. size_t len = left;
  68. void *vaddr;
  69. if (!pfn_valid(pfn))
  70. {
  71. /* Cannot map the page, we don't know its physical address.
  72. * Return and hope for the best */
  73. if (!xen_feature(XENFEAT_grant_map_identity))
  74. return;
  75. vaddr = xen_mm32_remap_page(handle) + offset;
  76. op(vaddr, len, dir);
  77. xen_mm32_unmap(vaddr - offset);
  78. } else {
  79. struct page *page = pfn_to_page(pfn);
  80. if (PageHighMem(page)) {
  81. if (len + offset > PAGE_SIZE)
  82. len = PAGE_SIZE - offset;
  83. if (cache_is_vipt_nonaliasing()) {
  84. vaddr = kmap_atomic(page);
  85. op(vaddr + offset, len, dir);
  86. kunmap_atomic(vaddr);
  87. } else {
  88. vaddr = kmap_high_get(page);
  89. if (vaddr) {
  90. op(vaddr + offset, len, dir);
  91. kunmap_high(page);
  92. }
  93. }
  94. } else {
  95. vaddr = page_address(page) + offset;
  96. op(vaddr, len, dir);
  97. }
  98. }
  99. offset = 0;
  100. pfn++;
  101. left -= len;
  102. } while (left);
  103. }
  104. static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
  105. size_t size, enum dma_data_direction dir)
  106. {
  107. /* Cannot use __dma_page_dev_to_cpu because we don't have a
  108. * struct page for handle */
  109. if (dir != DMA_TO_DEVICE)
  110. outer_inv_range(handle, handle + size);
  111. dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
  112. }
  113. static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
  114. size_t size, enum dma_data_direction dir)
  115. {
  116. dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
  117. if (dir == DMA_FROM_DEVICE) {
  118. outer_inv_range(handle, handle + size);
  119. } else {
  120. outer_clean_range(handle, handle + size);
  121. }
  122. }
  123. void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
  124. size_t size, enum dma_data_direction dir,
  125. struct dma_attrs *attrs)
  126. {
  127. if (!__generic_dma_ops(hwdev)->unmap_page)
  128. return;
  129. if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
  130. return;
  131. __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
  132. }
  133. void xen_dma_sync_single_for_cpu(struct device *hwdev,
  134. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  135. {
  136. if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
  137. return;
  138. __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
  139. }
  140. void xen_dma_sync_single_for_device(struct device *hwdev,
  141. dma_addr_t handle, size_t size, enum dma_data_direction dir)
  142. {
  143. if (!__generic_dma_ops(hwdev)->sync_single_for_device)
  144. return;
  145. __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
  146. }
  147. int __init xen_mm32_init(void)
  148. {
  149. int cpu;
  150. if (!xen_initial_domain())
  151. return 0;
  152. register_cpu_notifier(&xen_mm32_cpu_notifier);
  153. get_online_cpus();
  154. for_each_online_cpu(cpu) {
  155. if (alloc_xen_mm32_scratch_page(cpu)) {
  156. put_online_cpus();
  157. unregister_cpu_notifier(&xen_mm32_cpu_notifier);
  158. return -ENOMEM;
  159. }
  160. }
  161. put_online_cpus();
  162. return 0;
  163. }
  164. arch_initcall(xen_mm32_init);