mmu.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. #include <linux/pfn.h>
  2. #include <asm/xen/page.h>
  3. #include <asm/xen/hypercall.h>
  4. #include <xen/interface/memory.h>
  5. #include "multicalls.h"
  6. #include "mmu.h"
  7. /*
  8. * Protects atomic reservation decrease/increase against concurrent increases.
  9. * Also protects non-atomic updates of current_pages and balloon lists.
  10. */
  11. DEFINE_SPINLOCK(xen_reservation_lock);
  12. unsigned long arbitrary_virt_to_mfn(void *vaddr)
  13. {
  14. xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
  15. return PFN_DOWN(maddr.maddr);
  16. }
  17. xmaddr_t arbitrary_virt_to_machine(void *vaddr)
  18. {
  19. unsigned long address = (unsigned long)vaddr;
  20. unsigned int level;
  21. pte_t *pte;
  22. unsigned offset;
  23. /*
  24. * if the PFN is in the linear mapped vaddr range, we can just use
  25. * the (quick) virt_to_machine() p2m lookup
  26. */
  27. if (virt_addr_valid(vaddr))
  28. return virt_to_machine(vaddr);
  29. /* otherwise we have to do a (slower) full page-table walk */
  30. pte = lookup_address(address, &level);
  31. BUG_ON(pte == NULL);
  32. offset = address & ~PAGE_MASK;
  33. return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
  34. }
  35. EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
  36. static void xen_flush_tlb_all(void)
  37. {
  38. struct mmuext_op *op;
  39. struct multicall_space mcs;
  40. trace_xen_mmu_flush_tlb_all(0);
  41. preempt_disable();
  42. mcs = xen_mc_entry(sizeof(*op));
  43. op = mcs.args;
  44. op->cmd = MMUEXT_TLB_FLUSH_ALL;
  45. MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
  46. xen_mc_issue(PARAVIRT_LAZY_MMU);
  47. preempt_enable();
  48. }
  49. #define REMAP_BATCH_SIZE 16
  50. struct remap_data {
  51. xen_pfn_t *pfn;
  52. bool contiguous;
  53. bool no_translate;
  54. pgprot_t prot;
  55. struct mmu_update *mmu_update;
  56. };
  57. static int remap_area_pfn_pte_fn(pte_t *ptep, pgtable_t token,
  58. unsigned long addr, void *data)
  59. {
  60. struct remap_data *rmd = data;
  61. pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot));
  62. /*
  63. * If we have a contiguous range, just update the pfn itself,
  64. * else update pointer to be "next pfn".
  65. */
  66. if (rmd->contiguous)
  67. (*rmd->pfn)++;
  68. else
  69. rmd->pfn++;
  70. rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
  71. rmd->mmu_update->ptr |= rmd->no_translate ?
  72. MMU_PT_UPDATE_NO_TRANSLATE :
  73. MMU_NORMAL_PT_UPDATE;
  74. rmd->mmu_update->val = pte_val_ma(pte);
  75. rmd->mmu_update++;
  76. return 0;
  77. }
  78. static int do_remap_pfn(struct vm_area_struct *vma,
  79. unsigned long addr,
  80. xen_pfn_t *pfn, int nr,
  81. int *err_ptr, pgprot_t prot,
  82. unsigned int domid,
  83. bool no_translate,
  84. struct page **pages)
  85. {
  86. int err = 0;
  87. struct remap_data rmd;
  88. struct mmu_update mmu_update[REMAP_BATCH_SIZE];
  89. unsigned long range;
  90. int mapped = 0;
  91. BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
  92. rmd.pfn = pfn;
  93. rmd.prot = prot;
  94. /*
  95. * We use the err_ptr to indicate if there we are doing a contiguous
  96. * mapping or a discontigious mapping.
  97. */
  98. rmd.contiguous = !err_ptr;
  99. rmd.no_translate = no_translate;
  100. while (nr) {
  101. int index = 0;
  102. int done = 0;
  103. int batch = min(REMAP_BATCH_SIZE, nr);
  104. int batch_left = batch;
  105. range = (unsigned long)batch << PAGE_SHIFT;
  106. rmd.mmu_update = mmu_update;
  107. err = apply_to_page_range(vma->vm_mm, addr, range,
  108. remap_area_pfn_pte_fn, &rmd);
  109. if (err)
  110. goto out;
  111. /* We record the error for each page that gives an error, but
  112. * continue mapping until the whole set is done */
  113. do {
  114. int i;
  115. err = HYPERVISOR_mmu_update(&mmu_update[index],
  116. batch_left, &done, domid);
  117. /*
  118. * @err_ptr may be the same buffer as @gfn, so
  119. * only clear it after each chunk of @gfn is
  120. * used.
  121. */
  122. if (err_ptr) {
  123. for (i = index; i < index + done; i++)
  124. err_ptr[i] = 0;
  125. }
  126. if (err < 0) {
  127. if (!err_ptr)
  128. goto out;
  129. err_ptr[i] = err;
  130. done++; /* Skip failed frame. */
  131. } else
  132. mapped += done;
  133. batch_left -= done;
  134. index += done;
  135. } while (batch_left);
  136. nr -= batch;
  137. addr += range;
  138. if (err_ptr)
  139. err_ptr += batch;
  140. cond_resched();
  141. }
  142. out:
  143. xen_flush_tlb_all();
  144. return err < 0 ? err : mapped;
  145. }
  146. int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
  147. unsigned long addr,
  148. xen_pfn_t gfn, int nr,
  149. pgprot_t prot, unsigned domid,
  150. struct page **pages)
  151. {
  152. if (xen_feature(XENFEAT_auto_translated_physmap))
  153. return -EOPNOTSUPP;
  154. return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false,
  155. pages);
  156. }
  157. EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
  158. int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
  159. unsigned long addr,
  160. xen_pfn_t *gfn, int nr,
  161. int *err_ptr, pgprot_t prot,
  162. unsigned domid, struct page **pages)
  163. {
  164. if (xen_feature(XENFEAT_auto_translated_physmap))
  165. return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
  166. prot, domid, pages);
  167. /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
  168. * and the consequences later is quite hard to detect what the actual
  169. * cause of "wrong memory was mapped in".
  170. */
  171. BUG_ON(err_ptr == NULL);
  172. return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid,
  173. false, pages);
  174. }
  175. EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
  176. int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
  177. unsigned long addr,
  178. xen_pfn_t *mfn, int nr,
  179. int *err_ptr, pgprot_t prot,
  180. unsigned int domid, struct page **pages)
  181. {
  182. if (xen_feature(XENFEAT_auto_translated_physmap))
  183. return -EOPNOTSUPP;
  184. return do_remap_pfn(vma, addr, mfn, nr, err_ptr, prot, domid,
  185. true, pages);
  186. }
  187. EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
  188. /* Returns: 0 success */
  189. int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
  190. int nr, struct page **pages)
  191. {
  192. if (xen_feature(XENFEAT_auto_translated_physmap))
  193. return xen_xlate_unmap_gfn_range(vma, nr, pages);
  194. if (!pages)
  195. return 0;
  196. return -EINVAL;
  197. }
  198. EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);