p2m.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241
  1. #include <linux/bootmem.h>
  2. #include <linux/gfp.h>
  3. #include <linux/export.h>
  4. #include <linux/rwlock.h>
  5. #include <linux/slab.h>
  6. #include <linux/types.h>
  7. #include <linux/dma-mapping.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/swiotlb.h>
  10. #include <xen/xen.h>
  11. #include <xen/interface/memory.h>
  12. #include <xen/swiotlb-xen.h>
  13. #include <asm/cacheflush.h>
  14. #include <asm/xen/page.h>
  15. #include <asm/xen/hypercall.h>
  16. #include <asm/xen/interface.h>
  17. struct xen_p2m_entry {
  18. unsigned long pfn;
  19. unsigned long mfn;
  20. unsigned long nr_pages;
  21. struct rb_node rbnode_mach;
  22. struct rb_node rbnode_phys;
  23. };
  24. static rwlock_t p2m_lock;
  25. struct rb_root phys_to_mach = RB_ROOT;
  26. EXPORT_SYMBOL_GPL(phys_to_mach);
  27. static struct rb_root mach_to_phys = RB_ROOT;
  28. static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new)
  29. {
  30. struct rb_node **link = &phys_to_mach.rb_node;
  31. struct rb_node *parent = NULL;
  32. struct xen_p2m_entry *entry;
  33. int rc = 0;
  34. while (*link) {
  35. parent = *link;
  36. entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys);
  37. if (new->mfn == entry->mfn)
  38. goto err_out;
  39. if (new->pfn == entry->pfn)
  40. goto err_out;
  41. if (new->pfn < entry->pfn)
  42. link = &(*link)->rb_left;
  43. else
  44. link = &(*link)->rb_right;
  45. }
  46. rb_link_node(&new->rbnode_phys, parent, link);
  47. rb_insert_color(&new->rbnode_phys, &phys_to_mach);
  48. goto out;
  49. err_out:
  50. rc = -EINVAL;
  51. pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
  52. __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
  53. out:
  54. return rc;
  55. }
  56. unsigned long __pfn_to_mfn(unsigned long pfn)
  57. {
  58. struct rb_node *n = phys_to_mach.rb_node;
  59. struct xen_p2m_entry *entry;
  60. unsigned long irqflags;
  61. read_lock_irqsave(&p2m_lock, irqflags);
  62. while (n) {
  63. entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
  64. if (entry->pfn <= pfn &&
  65. entry->pfn + entry->nr_pages > pfn) {
  66. read_unlock_irqrestore(&p2m_lock, irqflags);
  67. return entry->mfn + (pfn - entry->pfn);
  68. }
  69. if (pfn < entry->pfn)
  70. n = n->rb_left;
  71. else
  72. n = n->rb_right;
  73. }
  74. read_unlock_irqrestore(&p2m_lock, irqflags);
  75. return INVALID_P2M_ENTRY;
  76. }
  77. EXPORT_SYMBOL_GPL(__pfn_to_mfn);
  78. static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new)
  79. {
  80. struct rb_node **link = &mach_to_phys.rb_node;
  81. struct rb_node *parent = NULL;
  82. struct xen_p2m_entry *entry;
  83. int rc = 0;
  84. while (*link) {
  85. parent = *link;
  86. entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach);
  87. if (new->mfn == entry->mfn)
  88. goto err_out;
  89. if (new->pfn == entry->pfn)
  90. goto err_out;
  91. if (new->mfn < entry->mfn)
  92. link = &(*link)->rb_left;
  93. else
  94. link = &(*link)->rb_right;
  95. }
  96. rb_link_node(&new->rbnode_mach, parent, link);
  97. rb_insert_color(&new->rbnode_mach, &mach_to_phys);
  98. goto out;
  99. err_out:
  100. rc = -EINVAL;
  101. pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n",
  102. __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn);
  103. out:
  104. return rc;
  105. }
  106. unsigned long __mfn_to_pfn(unsigned long mfn)
  107. {
  108. struct rb_node *n = mach_to_phys.rb_node;
  109. struct xen_p2m_entry *entry;
  110. unsigned long irqflags;
  111. read_lock_irqsave(&p2m_lock, irqflags);
  112. while (n) {
  113. entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach);
  114. if (entry->mfn <= mfn &&
  115. entry->mfn + entry->nr_pages > mfn) {
  116. read_unlock_irqrestore(&p2m_lock, irqflags);
  117. return entry->pfn + (mfn - entry->mfn);
  118. }
  119. if (mfn < entry->mfn)
  120. n = n->rb_left;
  121. else
  122. n = n->rb_right;
  123. }
  124. read_unlock_irqrestore(&p2m_lock, irqflags);
  125. return INVALID_P2M_ENTRY;
  126. }
  127. EXPORT_SYMBOL_GPL(__mfn_to_pfn);
  128. int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops,
  129. struct gnttab_map_grant_ref *kmap_ops,
  130. struct page **pages, unsigned int count)
  131. {
  132. int i;
  133. for (i = 0; i < count; i++) {
  134. if (map_ops[i].status)
  135. continue;
  136. set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
  137. map_ops[i].dev_bus_addr >> PAGE_SHIFT);
  138. }
  139. return 0;
  140. }
  141. EXPORT_SYMBOL_GPL(set_foreign_p2m_mapping);
  142. int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
  143. struct gnttab_map_grant_ref *kmap_ops,
  144. struct page **pages, unsigned int count)
  145. {
  146. int i;
  147. for (i = 0; i < count; i++) {
  148. set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
  149. INVALID_P2M_ENTRY);
  150. }
  151. return 0;
  152. }
  153. EXPORT_SYMBOL_GPL(clear_foreign_p2m_mapping);
  154. bool __set_phys_to_machine_multi(unsigned long pfn,
  155. unsigned long mfn, unsigned long nr_pages)
  156. {
  157. int rc;
  158. unsigned long irqflags;
  159. struct xen_p2m_entry *p2m_entry;
  160. struct rb_node *n = phys_to_mach.rb_node;
  161. if (mfn == INVALID_P2M_ENTRY) {
  162. write_lock_irqsave(&p2m_lock, irqflags);
  163. while (n) {
  164. p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys);
  165. if (p2m_entry->pfn <= pfn &&
  166. p2m_entry->pfn + p2m_entry->nr_pages > pfn) {
  167. rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys);
  168. rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach);
  169. write_unlock_irqrestore(&p2m_lock, irqflags);
  170. kfree(p2m_entry);
  171. return true;
  172. }
  173. if (pfn < p2m_entry->pfn)
  174. n = n->rb_left;
  175. else
  176. n = n->rb_right;
  177. }
  178. write_unlock_irqrestore(&p2m_lock, irqflags);
  179. return true;
  180. }
  181. p2m_entry = kzalloc(sizeof(struct xen_p2m_entry), GFP_NOWAIT);
  182. if (!p2m_entry) {
  183. pr_warn("cannot allocate xen_p2m_entry\n");
  184. return false;
  185. }
  186. p2m_entry->pfn = pfn;
  187. p2m_entry->nr_pages = nr_pages;
  188. p2m_entry->mfn = mfn;
  189. write_lock_irqsave(&p2m_lock, irqflags);
  190. if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) ||
  191. (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) {
  192. write_unlock_irqrestore(&p2m_lock, irqflags);
  193. return false;
  194. }
  195. write_unlock_irqrestore(&p2m_lock, irqflags);
  196. return true;
  197. }
  198. EXPORT_SYMBOL_GPL(__set_phys_to_machine_multi);
  199. bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
  200. {
  201. return __set_phys_to_machine_multi(pfn, mfn, 1);
  202. }
  203. EXPORT_SYMBOL_GPL(__set_phys_to_machine);
  204. static int p2m_init(void)
  205. {
  206. rwlock_init(&p2m_lock);
  207. return 0;
  208. }
  209. arch_initcall(p2m_init);