mmu.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. #include <linux/pfn.h>
  2. #include <asm/xen/page.h>
  3. #include <asm/xen/hypercall.h>
  4. #include <xen/interface/memory.h>
  5. #include "multicalls.h"
  6. #include "mmu.h"
  7. /*
  8. * Protects atomic reservation decrease/increase against concurrent increases.
  9. * Also protects non-atomic updates of current_pages and balloon lists.
  10. */
  11. DEFINE_SPINLOCK(xen_reservation_lock);
  12. unsigned long arbitrary_virt_to_mfn(void *vaddr)
  13. {
  14. xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
  15. return PFN_DOWN(maddr.maddr);
  16. }
  17. xmaddr_t arbitrary_virt_to_machine(void *vaddr)
  18. {
  19. unsigned long address = (unsigned long)vaddr;
  20. unsigned int level;
  21. pte_t *pte;
  22. unsigned offset;
  23. /*
  24. * if the PFN is in the linear mapped vaddr range, we can just use
  25. * the (quick) virt_to_machine() p2m lookup
  26. */
  27. if (virt_addr_valid(vaddr))
  28. return virt_to_machine(vaddr);
  29. /* otherwise we have to do a (slower) full page-table walk */
  30. pte = lookup_address(address, &level);
  31. BUG_ON(pte == NULL);
  32. offset = address & ~PAGE_MASK;
  33. return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
  34. }
  35. EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
  36. static void xen_flush_tlb_all(void)
  37. {
  38. struct mmuext_op *op;
  39. struct multicall_space mcs;
  40. trace_xen_mmu_flush_tlb_all(0);
  41. preempt_disable();
  42. mcs = xen_mc_entry(sizeof(*op));
  43. op = mcs.args;
  44. op->cmd = MMUEXT_TLB_FLUSH_ALL;
  45. MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
  46. xen_mc_issue(PARAVIRT_LAZY_MMU);
  47. preempt_enable();
  48. }
  49. #define REMAP_BATCH_SIZE 16
  50. struct remap_data {
  51. xen_pfn_t *mfn;
  52. bool contiguous;
  53. pgprot_t prot;
  54. struct mmu_update *mmu_update;
  55. };
  56. static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
  57. unsigned long addr, void *data)
  58. {
  59. struct remap_data *rmd = data;
  60. pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
  61. /* If we have a contiguous range, just update the mfn itself,
  62. else update pointer to be "next mfn". */
  63. if (rmd->contiguous)
  64. (*rmd->mfn)++;
  65. else
  66. rmd->mfn++;
  67. rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
  68. rmd->mmu_update->val = pte_val_ma(pte);
  69. rmd->mmu_update++;
  70. return 0;
  71. }
  72. static int do_remap_gfn(struct vm_area_struct *vma,
  73. unsigned long addr,
  74. xen_pfn_t *gfn, int nr,
  75. int *err_ptr, pgprot_t prot,
  76. unsigned domid,
  77. struct page **pages)
  78. {
  79. int err = 0;
  80. struct remap_data rmd;
  81. struct mmu_update mmu_update[REMAP_BATCH_SIZE];
  82. unsigned long range;
  83. int mapped = 0;
  84. BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
  85. rmd.mfn = gfn;
  86. rmd.prot = prot;
  87. /* We use the err_ptr to indicate if there we are doing a contiguous
  88. * mapping or a discontigious mapping. */
  89. rmd.contiguous = !err_ptr;
  90. while (nr) {
  91. int index = 0;
  92. int done = 0;
  93. int batch = min(REMAP_BATCH_SIZE, nr);
  94. int batch_left = batch;
  95. range = (unsigned long)batch << PAGE_SHIFT;
  96. rmd.mmu_update = mmu_update;
  97. err = apply_to_page_range(vma->vm_mm, addr, range,
  98. remap_area_mfn_pte_fn, &rmd);
  99. if (err)
  100. goto out;
  101. /* We record the error for each page that gives an error, but
  102. * continue mapping until the whole set is done */
  103. do {
  104. int i;
  105. err = HYPERVISOR_mmu_update(&mmu_update[index],
  106. batch_left, &done, domid);
  107. /*
  108. * @err_ptr may be the same buffer as @gfn, so
  109. * only clear it after each chunk of @gfn is
  110. * used.
  111. */
  112. if (err_ptr) {
  113. for (i = index; i < index + done; i++)
  114. err_ptr[i] = 0;
  115. }
  116. if (err < 0) {
  117. if (!err_ptr)
  118. goto out;
  119. err_ptr[i] = err;
  120. done++; /* Skip failed frame. */
  121. } else
  122. mapped += done;
  123. batch_left -= done;
  124. index += done;
  125. } while (batch_left);
  126. nr -= batch;
  127. addr += range;
  128. if (err_ptr)
  129. err_ptr += batch;
  130. cond_resched();
  131. }
  132. out:
  133. xen_flush_tlb_all();
  134. return err < 0 ? err : mapped;
  135. }
  136. int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
  137. unsigned long addr,
  138. xen_pfn_t gfn, int nr,
  139. pgprot_t prot, unsigned domid,
  140. struct page **pages)
  141. {
  142. if (xen_feature(XENFEAT_auto_translated_physmap))
  143. return -EOPNOTSUPP;
  144. return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
  145. }
  146. EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
  147. int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
  148. unsigned long addr,
  149. xen_pfn_t *gfn, int nr,
  150. int *err_ptr, pgprot_t prot,
  151. unsigned domid, struct page **pages)
  152. {
  153. if (xen_feature(XENFEAT_auto_translated_physmap))
  154. return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
  155. prot, domid, pages);
  156. /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
  157. * and the consequences later is quite hard to detect what the actual
  158. * cause of "wrong memory was mapped in".
  159. */
  160. BUG_ON(err_ptr == NULL);
  161. return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
  162. }
  163. EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
  164. /* Returns: 0 success */
  165. int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
  166. int nr, struct page **pages)
  167. {
  168. if (xen_feature(XENFEAT_auto_translated_physmap))
  169. return xen_xlate_unmap_gfn_range(vma, nr, pages);
  170. if (!pages)
  171. return 0;
  172. return -EINVAL;
  173. }
  174. EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);