mmu_context_iommu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /*
  2. * IOMMU helpers in MMU context.
  3. *
  4. * Copyright (C) 2015 IBM Corp. <aik@ozlabs.ru>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. */
  12. #include <linux/sched/signal.h>
  13. #include <linux/slab.h>
  14. #include <linux/rculist.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/mutex.h>
  17. #include <linux/migrate.h>
  18. #include <linux/hugetlb.h>
  19. #include <linux/swap.h>
  20. #include <linux/sizes.h>
  21. #include <asm/mmu_context.h>
  22. #include <asm/pte-walk.h>
  23. static DEFINE_MUTEX(mem_list_mutex);
  24. #define MM_IOMMU_TABLE_GROUP_PAGE_DIRTY 0x1
  25. #define MM_IOMMU_TABLE_GROUP_PAGE_MASK ~(SZ_4K - 1)
  26. struct mm_iommu_table_group_mem_t {
  27. struct list_head next;
  28. struct rcu_head rcu;
  29. unsigned long used;
  30. atomic64_t mapped;
  31. unsigned int pageshift;
  32. u64 ua; /* userspace address */
  33. u64 entries; /* number of entries in hpas[] */
  34. u64 *hpas; /* vmalloc'ed */
  35. };
  36. static long mm_iommu_adjust_locked_vm(struct mm_struct *mm,
  37. unsigned long npages, bool incr)
  38. {
  39. long ret = 0, locked, lock_limit;
  40. if (!npages)
  41. return 0;
  42. down_write(&mm->mmap_sem);
  43. if (incr) {
  44. locked = mm->locked_vm + npages;
  45. lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  46. if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  47. ret = -ENOMEM;
  48. else
  49. mm->locked_vm += npages;
  50. } else {
  51. if (WARN_ON_ONCE(npages > mm->locked_vm))
  52. npages = mm->locked_vm;
  53. mm->locked_vm -= npages;
  54. }
  55. pr_debug("[%d] RLIMIT_MEMLOCK HASH64 %c%ld %ld/%ld\n",
  56. current ? current->pid : 0,
  57. incr ? '+' : '-',
  58. npages << PAGE_SHIFT,
  59. mm->locked_vm << PAGE_SHIFT,
  60. rlimit(RLIMIT_MEMLOCK));
  61. up_write(&mm->mmap_sem);
  62. return ret;
  63. }
  64. bool mm_iommu_preregistered(struct mm_struct *mm)
  65. {
  66. return !list_empty(&mm->context.iommu_group_mem_list);
  67. }
  68. EXPORT_SYMBOL_GPL(mm_iommu_preregistered);
  69. /*
  70. * Taken from alloc_migrate_target with changes to remove CMA allocations
  71. */
  72. struct page *new_iommu_non_cma_page(struct page *page, unsigned long private)
  73. {
  74. gfp_t gfp_mask = GFP_USER;
  75. struct page *new_page;
  76. if (PageCompound(page))
  77. return NULL;
  78. if (PageHighMem(page))
  79. gfp_mask |= __GFP_HIGHMEM;
  80. /*
  81. * We don't want the allocation to force an OOM if possibe
  82. */
  83. new_page = alloc_page(gfp_mask | __GFP_NORETRY | __GFP_NOWARN);
  84. return new_page;
  85. }
  86. static int mm_iommu_move_page_from_cma(struct page *page)
  87. {
  88. int ret = 0;
  89. LIST_HEAD(cma_migrate_pages);
  90. /* Ignore huge pages for now */
  91. if (PageCompound(page))
  92. return -EBUSY;
  93. lru_add_drain();
  94. ret = isolate_lru_page(page);
  95. if (ret)
  96. return ret;
  97. list_add(&page->lru, &cma_migrate_pages);
  98. put_page(page); /* Drop the gup reference */
  99. ret = migrate_pages(&cma_migrate_pages, new_iommu_non_cma_page,
  100. NULL, 0, MIGRATE_SYNC, MR_CONTIG_RANGE);
  101. if (ret) {
  102. if (!list_empty(&cma_migrate_pages))
  103. putback_movable_pages(&cma_migrate_pages);
  104. }
  105. return 0;
  106. }
  107. long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
  108. struct mm_iommu_table_group_mem_t **pmem)
  109. {
  110. struct mm_iommu_table_group_mem_t *mem;
  111. long i, j, ret = 0, locked_entries = 0;
  112. unsigned int pageshift;
  113. unsigned long flags;
  114. unsigned long cur_ua;
  115. struct page *page = NULL;
  116. mutex_lock(&mem_list_mutex);
  117. list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list,
  118. next) {
  119. if ((mem->ua == ua) && (mem->entries == entries)) {
  120. ++mem->used;
  121. *pmem = mem;
  122. goto unlock_exit;
  123. }
  124. /* Overlap? */
  125. if ((mem->ua < (ua + (entries << PAGE_SHIFT))) &&
  126. (ua < (mem->ua +
  127. (mem->entries << PAGE_SHIFT)))) {
  128. ret = -EINVAL;
  129. goto unlock_exit;
  130. }
  131. }
  132. ret = mm_iommu_adjust_locked_vm(mm, entries, true);
  133. if (ret)
  134. goto unlock_exit;
  135. locked_entries = entries;
  136. mem = kzalloc(sizeof(*mem), GFP_KERNEL);
  137. if (!mem) {
  138. ret = -ENOMEM;
  139. goto unlock_exit;
  140. }
  141. /*
  142. * For a starting point for a maximum page size calculation
  143. * we use @ua and @entries natural alignment to allow IOMMU pages
  144. * smaller than huge pages but still bigger than PAGE_SIZE.
  145. */
  146. mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
  147. mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
  148. if (!mem->hpas) {
  149. kfree(mem);
  150. ret = -ENOMEM;
  151. goto unlock_exit;
  152. }
  153. for (i = 0; i < entries; ++i) {
  154. cur_ua = ua + (i << PAGE_SHIFT);
  155. if (1 != get_user_pages_fast(cur_ua,
  156. 1/* pages */, 1/* iswrite */, &page)) {
  157. ret = -EFAULT;
  158. for (j = 0; j < i; ++j)
  159. put_page(pfn_to_page(mem->hpas[j] >>
  160. PAGE_SHIFT));
  161. vfree(mem->hpas);
  162. kfree(mem);
  163. goto unlock_exit;
  164. }
  165. /*
  166. * If we get a page from the CMA zone, since we are going to
  167. * be pinning these entries, we might as well move them out
  168. * of the CMA zone if possible. NOTE: faulting in + migration
  169. * can be expensive. Batching can be considered later
  170. */
  171. if (is_migrate_cma_page(page)) {
  172. if (mm_iommu_move_page_from_cma(page))
  173. goto populate;
  174. if (1 != get_user_pages_fast(cur_ua,
  175. 1/* pages */, 1/* iswrite */,
  176. &page)) {
  177. ret = -EFAULT;
  178. for (j = 0; j < i; ++j)
  179. put_page(pfn_to_page(mem->hpas[j] >>
  180. PAGE_SHIFT));
  181. vfree(mem->hpas);
  182. kfree(mem);
  183. goto unlock_exit;
  184. }
  185. }
  186. populate:
  187. pageshift = PAGE_SHIFT;
  188. if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) {
  189. pte_t *pte;
  190. struct page *head = compound_head(page);
  191. unsigned int compshift = compound_order(head);
  192. unsigned int pteshift;
  193. local_irq_save(flags); /* disables as well */
  194. pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift);
  195. /* Double check it is still the same pinned page */
  196. if (pte && pte_page(*pte) == head &&
  197. pteshift == compshift + PAGE_SHIFT)
  198. pageshift = max_t(unsigned int, pteshift,
  199. PAGE_SHIFT);
  200. local_irq_restore(flags);
  201. }
  202. mem->pageshift = min(mem->pageshift, pageshift);
  203. mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
  204. }
  205. atomic64_set(&mem->mapped, 1);
  206. mem->used = 1;
  207. mem->ua = ua;
  208. mem->entries = entries;
  209. *pmem = mem;
  210. list_add_rcu(&mem->next, &mm->context.iommu_group_mem_list);
  211. unlock_exit:
  212. if (locked_entries && ret)
  213. mm_iommu_adjust_locked_vm(mm, locked_entries, false);
  214. mutex_unlock(&mem_list_mutex);
  215. return ret;
  216. }
  217. EXPORT_SYMBOL_GPL(mm_iommu_get);
  218. static void mm_iommu_unpin(struct mm_iommu_table_group_mem_t *mem)
  219. {
  220. long i;
  221. struct page *page = NULL;
  222. for (i = 0; i < mem->entries; ++i) {
  223. if (!mem->hpas[i])
  224. continue;
  225. page = pfn_to_page(mem->hpas[i] >> PAGE_SHIFT);
  226. if (!page)
  227. continue;
  228. if (mem->hpas[i] & MM_IOMMU_TABLE_GROUP_PAGE_DIRTY)
  229. SetPageDirty(page);
  230. put_page(page);
  231. mem->hpas[i] = 0;
  232. }
  233. }
  234. static void mm_iommu_do_free(struct mm_iommu_table_group_mem_t *mem)
  235. {
  236. mm_iommu_unpin(mem);
  237. vfree(mem->hpas);
  238. kfree(mem);
  239. }
  240. static void mm_iommu_free(struct rcu_head *head)
  241. {
  242. struct mm_iommu_table_group_mem_t *mem = container_of(head,
  243. struct mm_iommu_table_group_mem_t, rcu);
  244. mm_iommu_do_free(mem);
  245. }
  246. static void mm_iommu_release(struct mm_iommu_table_group_mem_t *mem)
  247. {
  248. list_del_rcu(&mem->next);
  249. call_rcu(&mem->rcu, mm_iommu_free);
  250. }
  251. long mm_iommu_put(struct mm_struct *mm, struct mm_iommu_table_group_mem_t *mem)
  252. {
  253. long ret = 0;
  254. mutex_lock(&mem_list_mutex);
  255. if (mem->used == 0) {
  256. ret = -ENOENT;
  257. goto unlock_exit;
  258. }
  259. --mem->used;
  260. /* There are still users, exit */
  261. if (mem->used)
  262. goto unlock_exit;
  263. /* Are there still mappings? */
  264. if (atomic_cmpxchg(&mem->mapped, 1, 0) != 1) {
  265. ++mem->used;
  266. ret = -EBUSY;
  267. goto unlock_exit;
  268. }
  269. /* @mapped became 0 so now mappings are disabled, release the region */
  270. mm_iommu_release(mem);
  271. mm_iommu_adjust_locked_vm(mm, mem->entries, false);
  272. unlock_exit:
  273. mutex_unlock(&mem_list_mutex);
  274. return ret;
  275. }
  276. EXPORT_SYMBOL_GPL(mm_iommu_put);
  277. struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
  278. unsigned long ua, unsigned long size)
  279. {
  280. struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
  281. list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
  282. if ((mem->ua <= ua) &&
  283. (ua + size <= mem->ua +
  284. (mem->entries << PAGE_SHIFT))) {
  285. ret = mem;
  286. break;
  287. }
  288. }
  289. return ret;
  290. }
  291. EXPORT_SYMBOL_GPL(mm_iommu_lookup);
  292. struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(struct mm_struct *mm,
  293. unsigned long ua, unsigned long size)
  294. {
  295. struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
  296. list_for_each_entry_lockless(mem, &mm->context.iommu_group_mem_list,
  297. next) {
  298. if ((mem->ua <= ua) &&
  299. (ua + size <= mem->ua +
  300. (mem->entries << PAGE_SHIFT))) {
  301. ret = mem;
  302. break;
  303. }
  304. }
  305. return ret;
  306. }
  307. struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
  308. unsigned long ua, unsigned long entries)
  309. {
  310. struct mm_iommu_table_group_mem_t *mem, *ret = NULL;
  311. list_for_each_entry_rcu(mem, &mm->context.iommu_group_mem_list, next) {
  312. if ((mem->ua == ua) && (mem->entries == entries)) {
  313. ret = mem;
  314. break;
  315. }
  316. }
  317. return ret;
  318. }
  319. EXPORT_SYMBOL_GPL(mm_iommu_find);
  320. long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
  321. unsigned long ua, unsigned int pageshift, unsigned long *hpa)
  322. {
  323. const long entry = (ua - mem->ua) >> PAGE_SHIFT;
  324. u64 *va = &mem->hpas[entry];
  325. if (entry >= mem->entries)
  326. return -EFAULT;
  327. if (pageshift > mem->pageshift)
  328. return -EFAULT;
  329. *hpa = (*va & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
  330. return 0;
  331. }
  332. EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
  333. long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
  334. unsigned long ua, unsigned int pageshift, unsigned long *hpa)
  335. {
  336. const long entry = (ua - mem->ua) >> PAGE_SHIFT;
  337. void *va = &mem->hpas[entry];
  338. unsigned long *pa;
  339. if (entry >= mem->entries)
  340. return -EFAULT;
  341. if (pageshift > mem->pageshift)
  342. return -EFAULT;
  343. pa = (void *) vmalloc_to_phys(va);
  344. if (!pa)
  345. return -EFAULT;
  346. *hpa = (*pa & MM_IOMMU_TABLE_GROUP_PAGE_MASK) | (ua & ~PAGE_MASK);
  347. return 0;
  348. }
  349. extern void mm_iommu_ua_mark_dirty_rm(struct mm_struct *mm, unsigned long ua)
  350. {
  351. struct mm_iommu_table_group_mem_t *mem;
  352. long entry;
  353. void *va;
  354. unsigned long *pa;
  355. mem = mm_iommu_lookup_rm(mm, ua, PAGE_SIZE);
  356. if (!mem)
  357. return;
  358. entry = (ua - mem->ua) >> PAGE_SHIFT;
  359. va = &mem->hpas[entry];
  360. pa = (void *) vmalloc_to_phys(va);
  361. if (!pa)
  362. return;
  363. *pa |= MM_IOMMU_TABLE_GROUP_PAGE_DIRTY;
  364. }
  365. long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem)
  366. {
  367. if (atomic64_inc_not_zero(&mem->mapped))
  368. return 0;
  369. /* Last mm_iommu_put() has been called, no more mappings allowed() */
  370. return -ENXIO;
  371. }
  372. EXPORT_SYMBOL_GPL(mm_iommu_mapped_inc);
  373. void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem)
  374. {
  375. atomic64_add_unless(&mem->mapped, -1, 1);
  376. }
  377. EXPORT_SYMBOL_GPL(mm_iommu_mapped_dec);
  378. void mm_iommu_init(struct mm_struct *mm)
  379. {
  380. INIT_LIST_HEAD_RCU(&mm->context.iommu_group_mem_list);
  381. }