mmu_context_iommu.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /*
  2. * IOMMU helpers in MMU context.
  3. *
  4. * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #include <linux/sched/signal.h>
  13. #include <linux/slab.h>
  14. #include <linux/rculist.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/mutex.h>
  17. #include <linux/migrate.h>
  18. #include <linux/hugetlb.h>
  19. #include <linux/swap.h>
  20. #include <asm/mmu_context.h>
  21. #include <asm/pte-walk.h>
  22. static DEFINE_MUTEX(mem_list_mutex);
  23. struct mm_iommu_table_group_mem_t {
  24. struct list_head next;
  25. struct rcu_head rcu;
  26. unsigned long used;
  27. atomic64_t mapped;
  28. unsigned int pageshift;
  29. u64 ua; /* userspace address */
  30. u64 entries; /* number of entries in hpas[] */
  31. u64 *hpas; /* vmalloc'ed */
  32. };
  33. static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
  34. unsigned long npages, bool incr)
  35. {
  36. long ret = 0, locked, lock_limit;
  37. if (!npages)
  38. return 0;
  39. down_write(&mm->mmap_sem);
  40. if (incr) {
  41. locked = mm->locked_vm + npages;
  42. lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  43. if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  44. ret = -ENOMEM;
  45. else
  46. mm->locked_vm += npages;
  47. } else {
  48. if (WARN_ON_ONCE(npages > mm->locked_vm))
  49. npages = mm->locked_vm;
  50. mm->locked_vm -= npages;
  51. }
  52. pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
  53. current ? current->pid : 0,
  54. incr ? '+' : '-',
  55. npages << PAGE_SHIFT,
  56. mm->locked_vm << PAGE_SHIFT,
  57. rlimit(RLIMIT_MEMLOCK));
  58. up_write(&mm->mmap_sem);
  59. return ret;
  60. }
  61. bool mm_iommu_preregistered(struct mm_struct *mm)
  62. {
  63. return !list_empty(&mm->context.iommu_group_mem_list);
  64. }
  65. EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
  66. /*
  67. * Taken from alloc_migrate_target with changes to remove CMA allocations
  68. */
  69. struct page *new_iommu_non_cma_page(struct page *page, unsigned long private)
  70. {
  71. gfp_t gfp_mask = GFP_USER;
  72. struct page *new_page;
  73. if (PageCompound(page))
  74. return NULL;
  75. if (PageHighMem(page))
  76. gfp_mask |= __GFP_HIGHMEM;
  77. /*
  78. * We don't want the allocation to force an OOM if possibe
  79. */
  80. new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
  81. return new_page;
  82. }
  83. static int mm_iommu_move_page_from_cma(struct page *page)
  84. {
  85. int ret = 0;
  86. LIST_HEAD(cma_migrate_pages);
  87. /* Ignore huge pages for now */
  88. if (PageCompound(page))
  89. return -EBUSY;
  90. lru_add_drain();
  91. ret = isolate_lru_page(page);
  92. if (ret)
  93. return ret;
  94. list_add(&page->lru, &cma_migrate_pages);
  95. put_page(page); /* Drop the gup reference */
  96. ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
  97. NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
  98. if (ret) {
  99. if (!list_empty(&cma_migrate_pages))
  100. putback_movable_pages(&cma_migrate_pages);
  101. }
  102. return 0;
  103. }
  104. long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
  105. struct mm_iommu_table_group_mem_t **pmem)
  106. {
  107. struct mm_iommu_table_group_mem_t *mem;
  108. long i, j, ret = 0, locked_entries = 0;
  109. unsigned int pageshift;
  110. unsigned long flags;
  111. struct page *page = NULL;
  112. mutex_lock(&mem_list_mutex);
  113. list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
  114. next) {
  115. if ((mem->ua == ua) && (mem->entries == entries)) {
  116. ++mem->used;
  117. *pmem = mem;
  118. goto unlock_exit;
  119. }
  120. /* Overlap? */
  121. if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
  122. (ua < (mem->ua +
  123. (mem->entries << PAGE_SHIFT)))) {
  124. ret = -EINVAL;
  125. goto unlock_exit;
  126. }
  127. }
  128. ret = mm_iommu_adjust_locked_vm(mm, entries, true);
  129. if (ret)
  130. goto unlock_exit;
  131. locked_entries = entries;
  132. mem = kzalloc(sizeof(*mem), GFP_KERNEL);
  133. if (!mem) {
  134. ret = -ENOMEM;
  135. goto unlock_exit;
  136. }
  137. /*
  138. * For a starting point for a maximum page size calculation
  139. * we use @ua and @entries natural alignment to allow IOMMU pages
  140. * smaller than huge pages but still bigger than PAGE_SIZE.
  141. */
  142. mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
  143. mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
  144. if (!mem->hpas) {
  145. kfree(mem);
  146. ret = -ENOMEM;
  147. goto unlock_exit;
  148. }
  149. for (i = 0; i < entries; ++i) {
  150. if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
  151. 1/* pages */, 1/* iswrite */, &page)) {
  152. ret = -EFAULT;
  153. for (j = 0; j < i; ++j)
  154. put_page(pfn_to_page(mem->hpas[j] >>
  155. PAGE_SHIFT));
  156. vfree(mem->hpas);
  157. kfree(mem);
  158. goto unlock_exit;
  159. }
  160. /*
  161. * If we get a page from the CMA zone, since we are going to
  162. * be pinning these entries, we might as well move them out
  163. * of the CMA zone if possible. NOTE: faulting in + migration
  164. * can be expensive. Batching can be considered later
  165. */
  166. if (is_migrate_cma_page(page)) {
  167. if (mm_iommu_move_page_from_cma(page))
  168. goto populate;
  169. if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT),
  170. 1/* pages */, 1/* iswrite */,
  171. &page)) {
  172. ret = -EFAULT;
  173. for (j = 0; j < i; ++j)
  174. put_page(pfn_to_page(mem->hpas[j] >>
  175. PAGE_SHIFT));
  176. vfree(mem->hpas);
  177. kfree(mem);
  178. goto unlock_exit;
  179. }
  180. }
  181. populate:
  182. pageshift = PAGE_SHIFT;
  183. if (PageCompound(page)) {
  184. pte_t *pte;
  185. struct page *head = compound_head(page);
  186. unsigned int compshift = compound_order(head);
  187. local_irq_save(flags); /* disables as well */
  188. pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
  189. local_irq_restore(flags);
  190. /* Double check it is still the same pinned page */
  191. if (pte && pte_page(*pte) == head &&
  192. pageshift == compshift)
  193. pageshift = max_t(unsigned int, pageshift,
  194. PAGE_SHIFT);
  195. }
  196. mem->pageshift = min(mem->pageshift, pageshift);
  197. mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
  198. }
  199. atomic64_set(&mem->mapped, 1);
  200. mem->used = 1;
  201. mem->ua = ua;
  202. mem->entries = entries;
  203. *pmem = mem;
  204. list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
  205. unlock_exit:
  206. if (locked_entries && ret)
  207. mm_iommu_adjust_locked_vm(mm, locked_entries, false);
  208. mutex_unlock(&mem_list_mutex);
  209. return ret;
  210. }
  211. EXPORT_SYMBOL_GPL(mm_iommu_get);
  212. static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
  213. {
  214. long i;
  215. struct page *page = NULL;
  216. for (i = 0; i < mem->entries; ++i) {
  217. if (!mem->hpas[i])
  218. continue;
  219. page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
  220. if (!page)
  221. continue;
  222. put_page(page);
  223. mem->hpas[i] = 0;
  224. }
  225. }
  226. static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
  227. {
  228. mm_iommu_unpin(mem);
  229. vfree(mem->hpas);
  230. kfree(mem);
  231. }
  232. static void mm_iommu_free(struct rcu_head *head)
  233. {
  234. struct mm_iommu_table_group_mem_t *mem = container_of(head,
  235. struct mm_iommu_table_group_mem_t, rcu);
  236. mm_iommu_do_free(mem);
  237. }
  238. static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
  239. {
  240. list_del_rcu(&mem->next);
  241. call_rcu(&mem->rcu, mm_iommu_free);
  242. }
  243. long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
  244. {
  245. long ret = 0;
  246. mutex_lock(&mem_list_mutex);
  247. if (mem->used == 0) {
  248. ret = -ENOENT;
  249. goto unlock_exit;
  250. }
  251. --mem->used;
  252. /* There are still users, exit */
  253. if (mem->used)
  254. goto unlock_exit;
  255. /* Are there still mappings? */
  256. if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
  257. ++mem->used;
  258. ret = -EBUSY;
  259. goto unlock_exit;
  260. }
  261. /* @mapped became 0 so now mappings are disabled, release the region */
  262. mm_iommu_release(mem);
  263. mm_iommu_adjust_locked_vm(mm, mem->entries, false);
  264. unlock_exit:
  265. mutex_unlock(&mem_list_mutex);
  266. return ret;
  267. }
  268. EXPORT_SYMBOL_GPL(mm_iommu_put);
  269. struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
  270. unsigned long ua, unsigned long size)
  271. {
  272. struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
  273. list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
  274. if ((mem->ua <= ua) &&
  275. (ua + size <= mem->ua +
  276. (mem->entries << PAGE_SHIFT))) {
  277. ret = mem;
  278. break;
  279. }
  280. }
  281. return ret;
  282. }
  283. EXPORT_SYMBOL_GPL(mm_iommu_lookup);
  284. struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
  285. unsigned long ua, unsigned long size)
  286. {
  287. struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
  288. list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
  289. next) {
  290. if ((mem->ua <= ua) &&
  291. (ua + size <= mem->ua +
  292. (mem->entries << PAGE_SHIFT))) {
  293. ret = mem;
  294. break;
  295. }
  296. }
  297. return ret;
  298. }
  299. EXPORT_SYMBOL_GPL(mm_iommu_lookup_rm);
  300. struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
  301. unsigned long ua, unsigned long entries)
  302. {
  303. struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
  304. list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
  305. if ((mem->ua == ua) && (mem->entries == entries)) {
  306. ret = mem;
  307. break;
  308. }
  309. }
  310. return ret;
  311. }
  312. EXPORT_SYMBOL_GPL(mm_iommu_find);
  313. long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
  314. unsigned long ua, unsigned int pageshift, unsigned long *hpa)
  315. {
  316. const long entry = (ua - mem->ua) >> PAGE_SHIFT;
  317. u64 *va = &mem->hpas[entry];
  318. if (entry >= mem->entries)
  319. return -EFAULT;
  320. if (pageshift > mem->pageshift)
  321. return -EFAULT;
  322. *hpa = *va | (ua & ~PAGE_MASK);
  323. return 0;
  324. }
  325. EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
  326. long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
  327. unsigned long ua, unsigned int pageshift, unsigned long *hpa)
  328. {
  329. const long entry = (ua - mem->ua) >> PAGE_SHIFT;
  330. void *va = &mem->hpas[entry];
  331. unsigned long *pa;
  332. if (entry >= mem->entries)
  333. return -EFAULT;
  334. if (pageshift > mem->pageshift)
  335. return -EFAULT;
  336. pa = (void *) vmalloc_to_phys(va);
  337. if (!pa)
  338. return -EFAULT;
  339. *hpa = *pa | (ua & ~PAGE_MASK);
  340. return 0;
  341. }
  342. EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa_rm);
  343. long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
  344. {
  345. if (atomic64_inc_not_zero(&mem->mapped))
  346. return 0;
  347. /* Last mm_iommu_put() has been called, no more mappings allowed() */
  348. return -ENXIO;
  349. }
  350. EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
  351. void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
  352. {
  353. atomic64_add_unless(&mem->mapped, -1, 1);
  354. }
  355. EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
  356. void mm_iommu_init(struct mm_struct *mm)
  357. {
  358. INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
  359. }