mmu_context_iommu.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. /*
  2. * IOMMU helpers in MMU context.
  3. *
  4. * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #include <linux/sched/signal.h>
  13. #include <linux/slab.h>
  14. #include <linux/rculist.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/mutex.h>
  17. #include <linux/migrate.h>
  18. #include <linux/hugetlb.h>
  19. #include <linux/swap.h>
  20. #include <asm/mmu_context.h>
  21. static DEFINE_MUTEX(mem_list_mutex);
  22. struct mm_iommu_table_group_mem_t {
  23. struct list_head next;
  24. struct rcu_head rcu;
  25. unsigned long used;
  26. atomic64_t mapped;
  27. u64 ua; /* userspace address */
  28. u64 entries; /* number of entries in hpas[] */
  29. u64 *hpas; /* vmalloc'ed */
  30. };
  31. static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
  32. unsigned long npages, bool incr)
  33. {
  34. long ret = 0, locked, lock_limit;
  35. if (!npages)
  36. return 0;
  37. down_write(&mm->mmap_sem);
  38. if (incr) {
  39. locked = mm->locked_vm + npages;
  40. lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  41. if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  42. ret = -ENOMEM;
  43. else
  44. mm->locked_vm += npages;
  45. } else {
  46. if (WARN_ON_ONCE(npages > mm->locked_vm))
  47. npages = mm->locked_vm;
  48. mm->locked_vm -= npages;
  49. }
  50. pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
  51. current ? current->pid : 0,
  52. incr ? '+' : '-',
  53. npages << PAGE_SHIFT,
  54. mm->locked_vm << PAGE_SHIFT,
  55. rlimit(RLIMIT_MEMLOCK));
  56. up_write(&mm->mmap_sem);
  57. return ret;
  58. }
  59. bool mm_iommu_preregistered(struct mm_struct *mm)
  60. {
  61. return !list_empty(&mm->context.iommu_group_mem_list);
  62. }
  63. EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
  64. /*
  65. * Taken from alloc_migrate_target with changes to remove CMA allocations
  66. */
  67. struct page *new_iommu_non_cma_page(struct page *page, unsigned long private)
  68. {
  69. gfp_t gfp_mask = GFP_USER;
  70. struct page *new_page;
  71. if (PageCompound(page))
  72. return NULL;
  73. if (PageHighMem(page))
  74. gfp_mask |= __GFP_HIGHMEM;
  75. /*
  76. * We don't want the allocation to force an OOM if possibe
  77. */
  78. new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
  79. return new_page;
  80. }
  81. static int mm_iommu_move_page_from_cma(struct page *page)
  82. {
  83. int ret = 0;
  84. LIST_HEAD(cma_migrate_pages);
  85. /* Ignore huge pages for now */
  86. if (PageCompound(page))
  87. return -EBUSY;
  88. lru_add_drain();
  89. ret = isolate_lru_page(page);
  90. if (ret)
  91. return ret;
  92. list_add(&page->lru, &cma_migrate_pages);
  93. put_page(page); /* Drop the gup reference */
  94. ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
  95. NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
  96. if (ret) {
  97. if (!list_empty(&cma_migrate_pages))
  98. putback_movable_pages(&cma_migrate_pages);
  99. }
  100. return 0;
  101. }
  102. long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
  103. struct mm_iommu_table_group_mem_t **pmem)
  104. {
  105. struct mm_iommu_table_group_mem_t *mem;
  106. long i, j, ret = 0, locked_entries = 0;
  107. struct page *page = NULL;
  108. mutex_lock(&mem_list_mutex);
  109. list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
  110. next) {
  111. if ((mem->ua == ua) && (mem->entries == entries)) {
  112. ++mem->used;
  113. *pmem = mem;
  114. goto unlock_exit;
  115. }
  116. /* Overlap? */
  117. if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
  118. (ua < (mem->ua +
  119. (mem->entries << PAGE_SHIFT)))) {
  120. ret = -EINVAL;
  121. goto unlock_exit;
  122. }
  123. }
  124. ret = mm_iommu_adjust_locked_vm(mm, entries, true);
  125. if (ret)
  126. goto unlock_exit;
  127. locked_entries = entries;
  128. mem = kzalloc(sizeof(*mem), GFP_KERNEL);
  129. if (!mem) {
  130. ret = -ENOMEM;
  131. goto unlock_exit;
  132. }
  133. mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
  134. if (!mem->hpas) {
  135. kfree(mem);
  136. ret = -ENOMEM;
  137. goto unlock_exit;
  138. }
  139. for (i = 0; i < entries; ++i) {
  140. if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
  141. 1/* pages */, 1/* iswrite */, &page)) {
  142. ret = -EFAULT;
  143. for (j = 0; j < i; ++j)
  144. put_page(pfn_to_page(mem->hpas[j] >>
  145. PAGE_SHIFT));
  146. vfree(mem->hpas);
  147. kfree(mem);
  148. goto unlock_exit;
  149. }
  150. /*
  151. * If we get a page from the CMA zone, since we are going to
  152. * be pinning these entries, we might as well move them out
  153. * of the CMA zone if possible. NOTE: faulting in + migration
  154. * can be expensive. Batching can be considered later
  155. */
  156. if (is_migrate_cma_page(page)) {
  157. if (mm_iommu_move_page_from_cma(page))
  158. goto populate;
  159. if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
  160. 1/* pages */, 1/* iswrite */,
  161. &page)) {
  162. ret = -EFAULT;
  163. for (j = 0; j < i; ++j)
  164. put_page(pfn_to_page(mem->hpas[j] >>
  165. PAGE_SHIFT));
  166. vfree(mem->hpas);
  167. kfree(mem);
  168. goto unlock_exit;
  169. }
  170. }
  171. populate:
  172. mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
  173. }
  174. atomic64_set(&mem->mapped, 1);
  175. mem->used = 1;
  176. mem->ua = ua;
  177. mem->entries = entries;
  178. *pmem = mem;
  179. list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
  180. unlock_exit:
  181. if (locked_entries && ret)
  182. mm_iommu_adjust_locked_vm(mm, locked_entries, false);
  183. mutex_unlock(&mem_list_mutex);
  184. return ret;
  185. }
  186. EXPORT_SYMBOL_GPL(mm_iommu_get);
  187. static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
  188. {
  189. long i;
  190. struct page *page = NULL;
  191. for (i = 0; i < mem->entries; ++i) {
  192. if (!mem->hpas[i])
  193. continue;
  194. page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
  195. if (!page)
  196. continue;
  197. put_page(page);
  198. mem->hpas[i] = 0;
  199. }
  200. }
  201. static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
  202. {
  203. mm_iommu_unpin(mem);
  204. vfree(mem->hpas);
  205. kfree(mem);
  206. }
  207. static void mm_iommu_free(struct rcu_head *head)
  208. {
  209. struct mm_iommu_table_group_mem_t *mem = container_of(head,
  210. struct mm_iommu_table_group_mem_t, rcu);
  211. mm_iommu_do_free(mem);
  212. }
  213. static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
  214. {
  215. list_del_rcu(&mem->next);
  216. call_rcu(&mem->rcu, mm_iommu_free);
  217. }
  218. long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
  219. {
  220. long ret = 0;
  221. mutex_lock(&mem_list_mutex);
  222. if (mem->used == 0) {
  223. ret = -ENOENT;
  224. goto unlock_exit;
  225. }
  226. --mem->used;
  227. /* There are still users, exit */
  228. if (mem->used)
  229. goto unlock_exit;
  230. /* Are there still mappings? */
  231. if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
  232. ++mem->used;
  233. ret = -EBUSY;
  234. goto unlock_exit;
  235. }
  236. /* @mapped became 0 so now mappings are disabled, release the region */
  237. mm_iommu_release(mem);
  238. mm_iommu_adjust_locked_vm(mm, mem->entries, false);
  239. unlock_exit:
  240. mutex_unlock(&mem_list_mutex);
  241. return ret;
  242. }
  243. EXPORT_SYMBOL_GPL(mm_iommu_put);
  244. struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
  245. unsigned long ua, unsigned long size)
  246. {
  247. struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
  248. list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
  249. if ((mem->ua <= ua) &&
  250. (ua + size <= mem->ua +
  251. (mem->entries << PAGE_SHIFT))) {
  252. ret = mem;
  253. break;
  254. }
  255. }
  256. return ret;
  257. }
  258. EXPORT_SYMBOL_GPL(mm_iommu_lookup);
  259. struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
  260. unsigned long ua, unsigned long size)
  261. {
  262. struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
  263. list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
  264. next) {
  265. if ((mem->ua <= ua) &&
  266. (ua + size <= mem->ua +
  267. (mem->entries << PAGE_SHIFT))) {
  268. ret = mem;
  269. break;
  270. }
  271. }
  272. return ret;
  273. }
  274. EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
  275. struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
  276. unsigned long ua, unsigned long entries)
  277. {
  278. struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
  279. list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
  280. if ((mem->ua == ua) && (mem->entries == entries)) {
  281. ret = mem;
  282. break;
  283. }
  284. }
  285. return ret;
  286. }
  287. EXPORT_SYMBOL_GPL(mm_iommu_find);
  288. long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
  289. unsigned long ua, unsigned long *hpa)
  290. {
  291. const long entry = (ua - mem->ua) >> PAGE_SHIFT;
  292. u64 *va = &mem->hpas[entry];
  293. if (entry >= mem->entries)
  294. return -EFAULT;
  295. *hpa = *va | (ua & ~PAGE_MASK);
  296. return 0;
  297. }
  298. EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
  299. long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
  300. unsigned long ua, unsigned long *hpa)
  301. {
  302. const long entry = (ua - mem->ua) >> PAGE_SHIFT;
  303. void *va = &mem->hpas[entry];
  304. unsigned long *pa;
  305. if (entry >= mem->entries)
  306. return -EFAULT;
  307. pa = (void *) vmalloc_to_phys(va);
  308. if (!pa)
  309. return -EFAULT;
  310. *hpa = *pa | (ua & ~PAGE_MASK);
  311. return 0;
  312. }
  313. EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
  314. long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
  315. {
  316. if (atomic64_inc_not_zero(&mem->mapped))
  317. return 0;
  318. /* Last mm_iommu_put() has been called, no more mappings allowed() */
  319. return -ENXIO;
  320. }
  321. EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
  322. void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
  323. {
  324. atomic64_add_unless(&mem->mapped, -1, 1);
  325. }
  326. EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
  327. void mm_iommu_init(struct mm_struct *mm)
  328. {
  329. INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
  330. }