mmu_context_iommu.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. /*
  2. * IOMMU helpers in MMU context.
  3. *
  4. * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/slab.h>
  14. #include <linux/rculist.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/mutex.h>
  17. #include <asm/mmu_context.h>
  18. static DEFINE_MUTEX(mem_list_mutex);
  19. struct mm_iommu_table_group_mem_t {
  20. struct list_head next;
  21. struct rcu_head rcu;
  22. unsigned long used;
  23. atomic64_t mapped;
  24. u64 ua; /* userspace address */
  25. u64 entries; /* number of entries in hpas[] */
  26. u64 *hpas; /* vmalloc'ed */
  27. };
  28. static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
  29. unsigned long npages, bool incr)
  30. {
  31. long ret = 0, locked, lock_limit;
  32. if (!npages)
  33. return 0;
  34. down_write(&mm->mmap_sem);
  35. if (incr) {
  36. locked = mm->locked_vm + npages;
  37. lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  38. if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  39. ret = -ENOMEM;
  40. else
  41. mm->locked_vm += npages;
  42. } else {
  43. if (WARN_ON_ONCE(npages > mm->locked_vm))
  44. npages = mm->locked_vm;
  45. mm->locked_vm -= npages;
  46. }
  47. pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
  48. current->pid,
  49. incr ? '+' : '-',
  50. npages << PAGE_SHIFT,
  51. mm->locked_vm << PAGE_SHIFT,
  52. rlimit(RLIMIT_MEMLOCK));
  53. up_write(&mm->mmap_sem);
  54. return ret;
  55. }
  56. bool mm_iommu_preregistered(void)
  57. {
  58. if (!current || !current->mm)
  59. return false;
  60. return !list_empty(&current->mm->context.iommu_group_mem_list);
  61. }
  62. EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
  63. long mm_iommu_get(unsigned long ua, unsigned long entries,
  64. struct mm_iommu_table_group_mem_t **pmem)
  65. {
  66. struct mm_iommu_table_group_mem_t *mem;
  67. long i, j, ret = 0, locked_entries = 0;
  68. struct page *page = NULL;
  69. if (!current || !current->mm)
  70. return -ESRCH; /* process exited */
  71. mutex_lock(&mem_list_mutex);
  72. list_for_each_entry_rcu(mem, &current->mm->context.iommu_group_mem_list,
  73. next) {
  74. if ((mem->ua == ua) && (mem->entries == entries)) {
  75. ++mem->used;
  76. *pmem = mem;
  77. goto unlock_exit;
  78. }
  79. /* Overlap? */
  80. if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
  81. (ua < (mem->ua +
  82. (mem->entries << PAGE_SHIFT)))) {
  83. ret = -EINVAL;
  84. goto unlock_exit;
  85. }
  86. }
  87. ret = mm_iommu_adjust_locked_vm(current->mm, entries, true);
  88. if (ret)
  89. goto unlock_exit;
  90. locked_entries = entries;
  91. mem = kzalloc(sizeof(*mem), GFP_KERNEL);
  92. if (!mem) {
  93. ret = -ENOMEM;
  94. goto unlock_exit;
  95. }
  96. mem->hpas = vzalloc(entries * sizeof(mem->hpas[0]));
  97. if (!mem->hpas) {
  98. kfree(mem);
  99. ret = -ENOMEM;
  100. goto unlock_exit;
  101. }
  102. for (i = 0; i < entries; ++i) {
  103. if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
  104. 1/* pages */, 1/* iswrite */, &page)) {
  105. for (j = 0; j < i; ++j)
  106. put_page(pfn_to_page(
  107. mem->hpas[j] >> PAGE_SHIFT));
  108. vfree(mem->hpas);
  109. kfree(mem);
  110. ret = -EFAULT;
  111. goto unlock_exit;
  112. }
  113. mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
  114. }
  115. atomic64_set(&mem->mapped, 1);
  116. mem->used = 1;
  117. mem->ua = ua;
  118. mem->entries = entries;
  119. *pmem = mem;
  120. list_add_rcu(&mem->next, &current->mm->context.iommu_group_mem_list);
  121. unlock_exit:
  122. if (locked_entries && ret)
  123. mm_iommu_adjust_locked_vm(current->mm, locked_entries, false);
  124. mutex_unlock(&mem_list_mutex);
  125. return ret;
  126. }
  127. EXPORT_SYMBOL_GPL(mm_iommu_get);
  128. static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
  129. {
  130. long i;
  131. struct page *page = NULL;
  132. for (i = 0; i < mem->entries; ++i) {
  133. if (!mem->hpas[i])
  134. continue;
  135. page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
  136. if (!page)
  137. continue;
  138. put_page(page);
  139. mem->hpas[i] = 0;
  140. }
  141. }
  142. static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
  143. {
  144. mm_iommu_unpin(mem);
  145. vfree(mem->hpas);
  146. kfree(mem);
  147. }
  148. static void mm_iommu_free(struct rcu_head *head)
  149. {
  150. struct mm_iommu_table_group_mem_t *mem = container_of(head,
  151. struct mm_iommu_table_group_mem_t, rcu);
  152. mm_iommu_do_free(mem);
  153. }
  154. static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
  155. {
  156. list_del_rcu(&mem->next);
  157. mm_iommu_adjust_locked_vm(current->mm, mem->entries, false);
  158. call_rcu(&mem->rcu, mm_iommu_free);
  159. }
  160. long mm_iommu_put(struct mm_iommu_table_group_mem_t *mem)
  161. {
  162. long ret = 0;
  163. if (!current || !current->mm)
  164. return -ESRCH; /* process exited */
  165. mutex_lock(&mem_list_mutex);
  166. if (mem->used == 0) {
  167. ret = -ENOENT;
  168. goto unlock_exit;
  169. }
  170. --mem->used;
  171. /* There are still users, exit */
  172. if (mem->used)
  173. goto unlock_exit;
  174. /* Are there still mappings? */
  175. if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
  176. ++mem->used;
  177. ret = -EBUSY;
  178. goto unlock_exit;
  179. }
  180. /* @mapped became 0 so now mappings are disabled, release the region */
  181. mm_iommu_release(mem);
  182. unlock_exit:
  183. mutex_unlock(&mem_list_mutex);
  184. return ret;
  185. }
  186. EXPORT_SYMBOL_GPL(mm_iommu_put);
  187. struct mm_iommu_table_group_mem_t *mm_iommu_lookup(unsigned long ua,
  188. unsigned long size)
  189. {
  190. struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
  191. list_for_each_entry_rcu(mem,
  192. &current->mm->context.iommu_group_mem_list,
  193. next) {
  194. if ((mem->ua <= ua) &&
  195. (ua + size <= mem->ua +
  196. (mem->entries << PAGE_SHIFT))) {
  197. ret = mem;
  198. break;
  199. }
  200. }
  201. return ret;
  202. }
  203. EXPORT_SYMBOL_GPL(mm_iommu_lookup);
  204. struct mm_iommu_table_group_mem_t *mm_iommu_find(unsigned long ua,
  205. unsigned long entries)
  206. {
  207. struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
  208. list_for_each_entry_rcu(mem,
  209. &current->mm->context.iommu_group_mem_list,
  210. next) {
  211. if ((mem->ua == ua) && (mem->entries == entries)) {
  212. ret = mem;
  213. break;
  214. }
  215. }
  216. return ret;
  217. }
  218. EXPORT_SYMBOL_GPL(mm_iommu_find);
  219. long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
  220. unsigned long ua, unsigned long *hpa)
  221. {
  222. const long entry = (ua - mem->ua) >> PAGE_SHIFT;
  223. u64 *va = &mem->hpas[entry];
  224. if (entry >= mem->entries)
  225. return -EFAULT;
  226. *hpa = *va | (ua & ~PAGE_MASK);
  227. return 0;
  228. }
  229. EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
  230. long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
  231. {
  232. if (atomic64_inc_not_zero(&mem->mapped))
  233. return 0;
  234. /* Last mm_iommu_put() has been called, no more mappings allowed() */
  235. return -ENXIO;
  236. }
  237. EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
  238. void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
  239. {
  240. atomic64_add_unless(&mem->mapped, -1, 1);
  241. }
  242. EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
  243. void mm_iommu_init(mm_context_t *ctx)
  244. {
  245. INIT_LIST_HEAD_RCU(&ctx->iommu_group_mem_list);
  246. }
  247. void mm_iommu_cleanup(mm_context_t *ctx)
  248. {
  249. struct mm_iommu_table_group_mem_t *mem, *tmp;
  250. list_for_each_entry_safe(mem, tmp, &ctx->iommu_group_mem_list, next) {
  251. list_del_rcu(&mem->next);
  252. mm_iommu_do_free(mem);
  253. }
  254. }