vfio_iommu_spapr_tce.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290
  1. /*
  2. * VFIO: IOMMU DMA mapping support for TCE on POWER
  3. *
  4. * Copyright (C) 2013 IBM Corp. All rights reserved.
  5. * Author: Alexey Kardashevskiy <aik@ozlabs.ru>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * Derived from original vfio_iommu_type1.c:
  12. * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
  13. * Author: Alex Williamson <alex.williamson@redhat.com>
  14. */
  15. #include <linux/module.h>
  16. #include <linux/pci.h>
  17. #include <linux/slab.h>
  18. #include <linux/uaccess.h>
  19. #include <linux/err.h>
  20. #include <linux/vfio.h>
  21. #include <linux/vmalloc.h>
  22. #include <asm/iommu.h>
  23. #include <asm/tce.h>
  24. #include <asm/mmu_context.h>
  25. #define DRIVER_VERSION "0.1"
  26. #define DRIVER_AUTHOR "aik@ozlabs.ru"
  27. #define DRIVER_DESC "VFIO IOMMU SPAPR TCE"
  28. static void tce_iommu_detach_group(void *iommu_data,
  29. struct iommu_group *iommu_group);
  30. static long try_increment_locked_vm(long npages)
  31. {
  32. long ret = 0, locked, lock_limit;
  33. if (!current || !current->mm)
  34. return -ESRCH; /* process exited */
  35. if (!npages)
  36. return 0;
  37. down_write(&current->mm->mmap_sem);
  38. locked = current->mm->locked_vm + npages;
  39. lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  40. if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  41. ret = -ENOMEM;
  42. else
  43. current->mm->locked_vm += npages;
  44. pr_debug("[%d] RLIMIT_MEMLOCK +%ld %ld/%ld%s\n", current->pid,
  45. npages << PAGE_SHIFT,
  46. current->mm->locked_vm << PAGE_SHIFT,
  47. rlimit(RLIMIT_MEMLOCK),
  48. ret ? " - exceeded" : "");
  49. up_write(&current->mm->mmap_sem);
  50. return ret;
  51. }
  52. static void decrement_locked_vm(long npages)
  53. {
  54. if (!current || !current->mm || !npages)
  55. return; /* process exited */
  56. down_write(&current->mm->mmap_sem);
  57. if (WARN_ON_ONCE(npages > current->mm->locked_vm))
  58. npages = current->mm->locked_vm;
  59. current->mm->locked_vm -= npages;
  60. pr_debug("[%d] RLIMIT_MEMLOCK -%ld %ld/%ld\n", current->pid,
  61. npages << PAGE_SHIFT,
  62. current->mm->locked_vm << PAGE_SHIFT,
  63. rlimit(RLIMIT_MEMLOCK));
  64. up_write(&current->mm->mmap_sem);
  65. }
  66. /*
  67. * VFIO IOMMU fd for SPAPR_TCE IOMMU implementation
  68. *
  69. * This code handles mapping and unmapping of user data buffers
  70. * into DMA'ble space using the IOMMU
  71. */
  72. struct tce_iommu_group {
  73. struct list_head next;
  74. struct iommu_group *grp;
  75. };
  76. /*
  77. * The container descriptor supports only a single group per container.
  78. * Required by the API as the container is not supplied with the IOMMU group
  79. * at the moment of initialization.
  80. */
  81. struct tce_container {
  82. struct mutex lock;
  83. bool enabled;
  84. bool v2;
  85. unsigned long locked_pages;
  86. struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
  87. struct list_head group_list;
  88. };
  89. static long tce_iommu_unregister_pages(struct tce_container *container,
  90. __u64 vaddr, __u64 size)
  91. {
  92. struct mm_iommu_table_group_mem_t *mem;
  93. if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK))
  94. return -EINVAL;
  95. mem = mm_iommu_find(vaddr, size >> PAGE_SHIFT);
  96. if (!mem)
  97. return -ENOENT;
  98. return mm_iommu_put(mem);
  99. }
  100. static long tce_iommu_register_pages(struct tce_container *container,
  101. __u64 vaddr, __u64 size)
  102. {
  103. long ret = 0;
  104. struct mm_iommu_table_group_mem_t *mem = NULL;
  105. unsigned long entries = size >> PAGE_SHIFT;
  106. if ((vaddr & ~PAGE_MASK) || (size & ~PAGE_MASK) ||
  107. ((vaddr + size) < vaddr))
  108. return -EINVAL;
  109. ret = mm_iommu_get(vaddr, entries, &mem);
  110. if (ret)
  111. return ret;
  112. container->enabled = true;
  113. return 0;
  114. }
  115. static long tce_iommu_userspace_view_alloc(struct iommu_table *tbl)
  116. {
  117. unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
  118. tbl->it_size, PAGE_SIZE);
  119. unsigned long *uas;
  120. long ret;
  121. BUG_ON(tbl->it_userspace);
  122. ret = try_increment_locked_vm(cb >> PAGE_SHIFT);
  123. if (ret)
  124. return ret;
  125. uas = vzalloc(cb);
  126. if (!uas) {
  127. decrement_locked_vm(cb >> PAGE_SHIFT);
  128. return -ENOMEM;
  129. }
  130. tbl->it_userspace = uas;
  131. return 0;
  132. }
  133. static void tce_iommu_userspace_view_free(struct iommu_table *tbl)
  134. {
  135. unsigned long cb = _ALIGN_UP(sizeof(tbl->it_userspace[0]) *
  136. tbl->it_size, PAGE_SIZE);
  137. if (!tbl->it_userspace)
  138. return;
  139. vfree(tbl->it_userspace);
  140. tbl->it_userspace = NULL;
  141. decrement_locked_vm(cb >> PAGE_SHIFT);
  142. }
  143. static bool tce_page_is_contained(struct page *page, unsigned page_shift)
  144. {
  145. /*
  146. * Check that the TCE table granularity is not bigger than the size of
  147. * a page we just found. Otherwise the hardware can get access to
  148. * a bigger memory chunk that it should.
  149. */
  150. return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
  151. }
  152. static inline bool tce_groups_attached(struct tce_container *container)
  153. {
  154. return !list_empty(&container->group_list);
  155. }
  156. static long tce_iommu_find_table(struct tce_container *container,
  157. phys_addr_t ioba, struct iommu_table **ptbl)
  158. {
  159. long i;
  160. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  161. struct iommu_table *tbl = container->tables[i];
  162. if (tbl) {
  163. unsigned long entry = ioba >> tbl->it_page_shift;
  164. unsigned long start = tbl->it_offset;
  165. unsigned long end = start + tbl->it_size;
  166. if ((start <= entry) && (entry < end)) {
  167. *ptbl = tbl;
  168. return i;
  169. }
  170. }
  171. }
  172. return -1;
  173. }
  174. static int tce_iommu_find_free_table(struct tce_container *container)
  175. {
  176. int i;
  177. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  178. if (!container->tables[i])
  179. return i;
  180. }
  181. return -ENOSPC;
  182. }
  183. static int tce_iommu_enable(struct tce_container *container)
  184. {
  185. int ret = 0;
  186. unsigned long locked;
  187. struct iommu_table_group *table_group;
  188. struct tce_iommu_group *tcegrp;
  189. if (!current->mm)
  190. return -ESRCH; /* process exited */
  191. if (container->enabled)
  192. return -EBUSY;
  193. /*
  194. * When userspace pages are mapped into the IOMMU, they are effectively
  195. * locked memory, so, theoretically, we need to update the accounting
  196. * of locked pages on each map and unmap. For powerpc, the map unmap
  197. * paths can be very hot, though, and the accounting would kill
  198. * performance, especially since it would be difficult to impossible
  199. * to handle the accounting in real mode only.
  200. *
  201. * To address that, rather than precisely accounting every page, we
  202. * instead account for a worst case on locked memory when the iommu is
  203. * enabled and disabled. The worst case upper bound on locked memory
  204. * is the size of the whole iommu window, which is usually relatively
  205. * small (compared to total memory sizes) on POWER hardware.
  206. *
  207. * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
  208. * that would effectively kill the guest at random points, much better
  209. * enforcing the limit based on the max that the guest can map.
  210. *
  211. * Unfortunately at the moment it counts whole tables, no matter how
  212. * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
  213. * each with 2GB DMA window, 8GB will be counted here. The reason for
  214. * this is that we cannot tell here the amount of RAM used by the guest
  215. * as this information is only available from KVM and VFIO is
  216. * KVM agnostic.
  217. *
  218. * So we do not allow enabling a container without a group attached
  219. * as there is no way to know how much we should increment
  220. * the locked_vm counter.
  221. */
  222. if (!tce_groups_attached(container))
  223. return -ENODEV;
  224. tcegrp = list_first_entry(&container->group_list,
  225. struct tce_iommu_group, next);
  226. table_group = iommu_group_get_iommudata(tcegrp->grp);
  227. if (!table_group)
  228. return -ENODEV;
  229. if (!table_group->tce32_size)
  230. return -EPERM;
  231. locked = table_group->tce32_size >> PAGE_SHIFT;
  232. ret = try_increment_locked_vm(locked);
  233. if (ret)
  234. return ret;
  235. container->locked_pages = locked;
  236. container->enabled = true;
  237. return ret;
  238. }
  239. static void tce_iommu_disable(struct tce_container *container)
  240. {
  241. if (!container->enabled)
  242. return;
  243. container->enabled = false;
  244. if (!current->mm)
  245. return;
  246. decrement_locked_vm(container->locked_pages);
  247. }
  248. static void *tce_iommu_open(unsigned long arg)
  249. {
  250. struct tce_container *container;
  251. if ((arg != VFIO_SPAPR_TCE_IOMMU) && (arg != VFIO_SPAPR_TCE_v2_IOMMU)) {
  252. pr_err("tce_vfio: Wrong IOMMU type\n");
  253. return ERR_PTR(-EINVAL);
  254. }
  255. container = kzalloc(sizeof(*container), GFP_KERNEL);
  256. if (!container)
  257. return ERR_PTR(-ENOMEM);
  258. mutex_init(&container->lock);
  259. INIT_LIST_HEAD_RCU(&container->group_list);
  260. container->v2 = arg == VFIO_SPAPR_TCE_v2_IOMMU;
  261. return container;
  262. }
  263. static int tce_iommu_clear(struct tce_container *container,
  264. struct iommu_table *tbl,
  265. unsigned long entry, unsigned long pages);
  266. static void tce_iommu_free_table(struct iommu_table *tbl);
  267. static void tce_iommu_release(void *iommu_data)
  268. {
  269. struct tce_container *container = iommu_data;
  270. struct tce_iommu_group *tcegrp;
  271. long i;
  272. while (tce_groups_attached(container)) {
  273. tcegrp = list_first_entry(&container->group_list,
  274. struct tce_iommu_group, next);
  275. tce_iommu_detach_group(iommu_data, tcegrp->grp);
  276. }
  277. /*
  278. * If VFIO created a table, it was not disposed
  279. * by tce_iommu_detach_group() so do it now.
  280. */
  281. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  282. struct iommu_table *tbl = container->tables[i];
  283. if (!tbl)
  284. continue;
  285. tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
  286. tce_iommu_free_table(tbl);
  287. }
  288. tce_iommu_disable(container);
  289. mutex_destroy(&container->lock);
  290. kfree(container);
  291. }
  292. static void tce_iommu_unuse_page(struct tce_container *container,
  293. unsigned long hpa)
  294. {
  295. struct page *page;
  296. page = pfn_to_page(hpa >> PAGE_SHIFT);
  297. put_page(page);
  298. }
  299. static int tce_iommu_prereg_ua_to_hpa(unsigned long tce, unsigned long size,
  300. unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
  301. {
  302. long ret = 0;
  303. struct mm_iommu_table_group_mem_t *mem;
  304. mem = mm_iommu_lookup(tce, size);
  305. if (!mem)
  306. return -EINVAL;
  307. ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
  308. if (ret)
  309. return -EINVAL;
  310. *pmem = mem;
  311. return 0;
  312. }
  313. static void tce_iommu_unuse_page_v2(struct iommu_table *tbl,
  314. unsigned long entry)
  315. {
  316. struct mm_iommu_table_group_mem_t *mem = NULL;
  317. int ret;
  318. unsigned long hpa = 0;
  319. unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
  320. if (!pua || !current || !current->mm)
  321. return;
  322. ret = tce_iommu_prereg_ua_to_hpa(*pua, IOMMU_PAGE_SIZE(tbl),
  323. &hpa, &mem);
  324. if (ret)
  325. pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
  326. __func__, *pua, entry, ret);
  327. if (mem)
  328. mm_iommu_mapped_dec(mem);
  329. *pua = 0;
  330. }
  331. static int tce_iommu_clear(struct tce_container *container,
  332. struct iommu_table *tbl,
  333. unsigned long entry, unsigned long pages)
  334. {
  335. unsigned long oldhpa;
  336. long ret;
  337. enum dma_data_direction direction;
  338. for ( ; pages; --pages, ++entry) {
  339. direction = DMA_NONE;
  340. oldhpa = 0;
  341. ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
  342. if (ret)
  343. continue;
  344. if (direction == DMA_NONE)
  345. continue;
  346. if (container->v2) {
  347. tce_iommu_unuse_page_v2(tbl, entry);
  348. continue;
  349. }
  350. tce_iommu_unuse_page(container, oldhpa);
  351. }
  352. return 0;
  353. }
  354. static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa)
  355. {
  356. struct page *page = NULL;
  357. enum dma_data_direction direction = iommu_tce_direction(tce);
  358. if (get_user_pages_fast(tce & PAGE_MASK, 1,
  359. direction != DMA_TO_DEVICE, &page) != 1)
  360. return -EFAULT;
  361. *hpa = __pa((unsigned long) page_address(page));
  362. return 0;
  363. }
  364. static long tce_iommu_build(struct tce_container *container,
  365. struct iommu_table *tbl,
  366. unsigned long entry, unsigned long tce, unsigned long pages,
  367. enum dma_data_direction direction)
  368. {
  369. long i, ret = 0;
  370. struct page *page;
  371. unsigned long hpa;
  372. enum dma_data_direction dirtmp;
  373. for (i = 0; i < pages; ++i) {
  374. unsigned long offset = tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
  375. ret = tce_iommu_use_page(tce, &hpa);
  376. if (ret)
  377. break;
  378. page = pfn_to_page(hpa >> PAGE_SHIFT);
  379. if (!tce_page_is_contained(page, tbl->it_page_shift)) {
  380. ret = -EPERM;
  381. break;
  382. }
  383. hpa |= offset;
  384. dirtmp = direction;
  385. ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
  386. if (ret) {
  387. tce_iommu_unuse_page(container, hpa);
  388. pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
  389. __func__, entry << tbl->it_page_shift,
  390. tce, ret);
  391. break;
  392. }
  393. if (dirtmp != DMA_NONE)
  394. tce_iommu_unuse_page(container, hpa);
  395. tce += IOMMU_PAGE_SIZE(tbl);
  396. }
  397. if (ret)
  398. tce_iommu_clear(container, tbl, entry, i);
  399. return ret;
  400. }
  401. static long tce_iommu_build_v2(struct tce_container *container,
  402. struct iommu_table *tbl,
  403. unsigned long entry, unsigned long tce, unsigned long pages,
  404. enum dma_data_direction direction)
  405. {
  406. long i, ret = 0;
  407. struct page *page;
  408. unsigned long hpa;
  409. enum dma_data_direction dirtmp;
  410. for (i = 0; i < pages; ++i) {
  411. struct mm_iommu_table_group_mem_t *mem = NULL;
  412. unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
  413. entry + i);
  414. ret = tce_iommu_prereg_ua_to_hpa(tce, IOMMU_PAGE_SIZE(tbl),
  415. &hpa, &mem);
  416. if (ret)
  417. break;
  418. page = pfn_to_page(hpa >> PAGE_SHIFT);
  419. if (!tce_page_is_contained(page, tbl->it_page_shift)) {
  420. ret = -EPERM;
  421. break;
  422. }
  423. /* Preserve offset within IOMMU page */
  424. hpa |= tce & IOMMU_PAGE_MASK(tbl) & ~PAGE_MASK;
  425. dirtmp = direction;
  426. /* The registered region is being unregistered */
  427. if (mm_iommu_mapped_inc(mem))
  428. break;
  429. ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
  430. if (ret) {
  431. /* dirtmp cannot be DMA_NONE here */
  432. tce_iommu_unuse_page_v2(tbl, entry + i);
  433. pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
  434. __func__, entry << tbl->it_page_shift,
  435. tce, ret);
  436. break;
  437. }
  438. if (dirtmp != DMA_NONE)
  439. tce_iommu_unuse_page_v2(tbl, entry + i);
  440. *pua = tce;
  441. tce += IOMMU_PAGE_SIZE(tbl);
  442. }
  443. if (ret)
  444. tce_iommu_clear(container, tbl, entry, i);
  445. return ret;
  446. }
  447. static long tce_iommu_create_table(struct tce_container *container,
  448. struct iommu_table_group *table_group,
  449. int num,
  450. __u32 page_shift,
  451. __u64 window_size,
  452. __u32 levels,
  453. struct iommu_table **ptbl)
  454. {
  455. long ret, table_size;
  456. table_size = table_group->ops->get_table_size(page_shift, window_size,
  457. levels);
  458. if (!table_size)
  459. return -EINVAL;
  460. ret = try_increment_locked_vm(table_size >> PAGE_SHIFT);
  461. if (ret)
  462. return ret;
  463. ret = table_group->ops->create_table(table_group, num,
  464. page_shift, window_size, levels, ptbl);
  465. WARN_ON(!ret && !(*ptbl)->it_ops->free);
  466. WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
  467. if (!ret && container->v2) {
  468. ret = tce_iommu_userspace_view_alloc(*ptbl);
  469. if (ret)
  470. (*ptbl)->it_ops->free(*ptbl);
  471. }
  472. if (ret)
  473. decrement_locked_vm(table_size >> PAGE_SHIFT);
  474. return ret;
  475. }
  476. static void tce_iommu_free_table(struct iommu_table *tbl)
  477. {
  478. unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
  479. tce_iommu_userspace_view_free(tbl);
  480. tbl->it_ops->free(tbl);
  481. decrement_locked_vm(pages);
  482. }
  483. static long tce_iommu_create_window(struct tce_container *container,
  484. __u32 page_shift, __u64 window_size, __u32 levels,
  485. __u64 *start_addr)
  486. {
  487. struct tce_iommu_group *tcegrp;
  488. struct iommu_table_group *table_group;
  489. struct iommu_table *tbl = NULL;
  490. long ret, num;
  491. num = tce_iommu_find_free_table(container);
  492. if (num < 0)
  493. return num;
  494. /* Get the first group for ops::create_table */
  495. tcegrp = list_first_entry(&container->group_list,
  496. struct tce_iommu_group, next);
  497. table_group = iommu_group_get_iommudata(tcegrp->grp);
  498. if (!table_group)
  499. return -EFAULT;
  500. if (!(table_group->pgsizes & (1ULL << page_shift)))
  501. return -EINVAL;
  502. if (!table_group->ops->set_window || !table_group->ops->unset_window ||
  503. !table_group->ops->get_table_size ||
  504. !table_group->ops->create_table)
  505. return -EPERM;
  506. /* Create TCE table */
  507. ret = tce_iommu_create_table(container, table_group, num,
  508. page_shift, window_size, levels, &tbl);
  509. if (ret)
  510. return ret;
  511. BUG_ON(!tbl->it_ops->free);
  512. /*
  513. * Program the table to every group.
  514. * Groups have been tested for compatibility at the attach time.
  515. */
  516. list_for_each_entry(tcegrp, &container->group_list, next) {
  517. table_group = iommu_group_get_iommudata(tcegrp->grp);
  518. ret = table_group->ops->set_window(table_group, num, tbl);
  519. if (ret)
  520. goto unset_exit;
  521. }
  522. container->tables[num] = tbl;
  523. /* Return start address assigned by platform in create_table() */
  524. *start_addr = tbl->it_offset << tbl->it_page_shift;
  525. return 0;
  526. unset_exit:
  527. list_for_each_entry(tcegrp, &container->group_list, next) {
  528. table_group = iommu_group_get_iommudata(tcegrp->grp);
  529. table_group->ops->unset_window(table_group, num);
  530. }
  531. tce_iommu_free_table(tbl);
  532. return ret;
  533. }
  534. static long tce_iommu_remove_window(struct tce_container *container,
  535. __u64 start_addr)
  536. {
  537. struct iommu_table_group *table_group = NULL;
  538. struct iommu_table *tbl;
  539. struct tce_iommu_group *tcegrp;
  540. int num;
  541. num = tce_iommu_find_table(container, start_addr, &tbl);
  542. if (num < 0)
  543. return -EINVAL;
  544. BUG_ON(!tbl->it_size);
  545. /* Detach groups from IOMMUs */
  546. list_for_each_entry(tcegrp, &container->group_list, next) {
  547. table_group = iommu_group_get_iommudata(tcegrp->grp);
  548. /*
  549. * SPAPR TCE IOMMU exposes the default DMA window to
  550. * the guest via dma32_window_start/size of
  551. * VFIO_IOMMU_SPAPR_TCE_GET_INFO. Some platforms allow
  552. * the userspace to remove this window, some do not so
  553. * here we check for the platform capability.
  554. */
  555. if (!table_group->ops || !table_group->ops->unset_window)
  556. return -EPERM;
  557. table_group->ops->unset_window(table_group, num);
  558. }
  559. /* Free table */
  560. tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
  561. tce_iommu_free_table(tbl);
  562. container->tables[num] = NULL;
  563. return 0;
  564. }
  565. static long tce_iommu_ioctl(void *iommu_data,
  566. unsigned int cmd, unsigned long arg)
  567. {
  568. struct tce_container *container = iommu_data;
  569. unsigned long minsz, ddwsz;
  570. long ret;
  571. switch (cmd) {
  572. case VFIO_CHECK_EXTENSION:
  573. switch (arg) {
  574. case VFIO_SPAPR_TCE_IOMMU:
  575. case VFIO_SPAPR_TCE_v2_IOMMU:
  576. ret = 1;
  577. break;
  578. default:
  579. ret = vfio_spapr_iommu_eeh_ioctl(NULL, cmd, arg);
  580. break;
  581. }
  582. return (ret < 0) ? 0 : ret;
  583. case VFIO_IOMMU_SPAPR_TCE_GET_INFO: {
  584. struct vfio_iommu_spapr_tce_info info;
  585. struct tce_iommu_group *tcegrp;
  586. struct iommu_table_group *table_group;
  587. if (!tce_groups_attached(container))
  588. return -ENXIO;
  589. tcegrp = list_first_entry(&container->group_list,
  590. struct tce_iommu_group, next);
  591. table_group = iommu_group_get_iommudata(tcegrp->grp);
  592. if (!table_group)
  593. return -ENXIO;
  594. minsz = offsetofend(struct vfio_iommu_spapr_tce_info,
  595. dma32_window_size);
  596. if (copy_from_user(&info, (void __user *)arg, minsz))
  597. return -EFAULT;
  598. if (info.argsz < minsz)
  599. return -EINVAL;
  600. info.dma32_window_start = table_group->tce32_start;
  601. info.dma32_window_size = table_group->tce32_size;
  602. info.flags = 0;
  603. memset(&info.ddw, 0, sizeof(info.ddw));
  604. if (table_group->max_dynamic_windows_supported &&
  605. container->v2) {
  606. info.flags |= VFIO_IOMMU_SPAPR_INFO_DDW;
  607. info.ddw.pgsizes = table_group->pgsizes;
  608. info.ddw.max_dynamic_windows_supported =
  609. table_group->max_dynamic_windows_supported;
  610. info.ddw.levels = table_group->max_levels;
  611. }
  612. ddwsz = offsetofend(struct vfio_iommu_spapr_tce_info, ddw);
  613. if (info.argsz >= ddwsz)
  614. minsz = ddwsz;
  615. if (copy_to_user((void __user *)arg, &info, minsz))
  616. return -EFAULT;
  617. return 0;
  618. }
  619. case VFIO_IOMMU_MAP_DMA: {
  620. struct vfio_iommu_type1_dma_map param;
  621. struct iommu_table *tbl = NULL;
  622. long num;
  623. enum dma_data_direction direction;
  624. if (!container->enabled)
  625. return -EPERM;
  626. minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
  627. if (copy_from_user(&param, (void __user *)arg, minsz))
  628. return -EFAULT;
  629. if (param.argsz < minsz)
  630. return -EINVAL;
  631. if (param.flags & ~(VFIO_DMA_MAP_FLAG_READ |
  632. VFIO_DMA_MAP_FLAG_WRITE))
  633. return -EINVAL;
  634. num = tce_iommu_find_table(container, param.iova, &tbl);
  635. if (num < 0)
  636. return -ENXIO;
  637. if ((param.size & ~IOMMU_PAGE_MASK(tbl)) ||
  638. (param.vaddr & ~IOMMU_PAGE_MASK(tbl)))
  639. return -EINVAL;
  640. /* iova is checked by the IOMMU API */
  641. if (param.flags & VFIO_DMA_MAP_FLAG_READ) {
  642. if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
  643. direction = DMA_BIDIRECTIONAL;
  644. else
  645. direction = DMA_TO_DEVICE;
  646. } else {
  647. if (param.flags & VFIO_DMA_MAP_FLAG_WRITE)
  648. direction = DMA_FROM_DEVICE;
  649. else
  650. return -EINVAL;
  651. }
  652. ret = iommu_tce_put_param_check(tbl, param.iova, param.vaddr);
  653. if (ret)
  654. return ret;
  655. if (container->v2)
  656. ret = tce_iommu_build_v2(container, tbl,
  657. param.iova >> tbl->it_page_shift,
  658. param.vaddr,
  659. param.size >> tbl->it_page_shift,
  660. direction);
  661. else
  662. ret = tce_iommu_build(container, tbl,
  663. param.iova >> tbl->it_page_shift,
  664. param.vaddr,
  665. param.size >> tbl->it_page_shift,
  666. direction);
  667. iommu_flush_tce(tbl);
  668. return ret;
  669. }
  670. case VFIO_IOMMU_UNMAP_DMA: {
  671. struct vfio_iommu_type1_dma_unmap param;
  672. struct iommu_table *tbl = NULL;
  673. long num;
  674. if (!container->enabled)
  675. return -EPERM;
  676. minsz = offsetofend(struct vfio_iommu_type1_dma_unmap,
  677. size);
  678. if (copy_from_user(&param, (void __user *)arg, minsz))
  679. return -EFAULT;
  680. if (param.argsz < minsz)
  681. return -EINVAL;
  682. /* No flag is supported now */
  683. if (param.flags)
  684. return -EINVAL;
  685. num = tce_iommu_find_table(container, param.iova, &tbl);
  686. if (num < 0)
  687. return -ENXIO;
  688. if (param.size & ~IOMMU_PAGE_MASK(tbl))
  689. return -EINVAL;
  690. ret = iommu_tce_clear_param_check(tbl, param.iova, 0,
  691. param.size >> tbl->it_page_shift);
  692. if (ret)
  693. return ret;
  694. ret = tce_iommu_clear(container, tbl,
  695. param.iova >> tbl->it_page_shift,
  696. param.size >> tbl->it_page_shift);
  697. iommu_flush_tce(tbl);
  698. return ret;
  699. }
  700. case VFIO_IOMMU_SPAPR_REGISTER_MEMORY: {
  701. struct vfio_iommu_spapr_register_memory param;
  702. if (!container->v2)
  703. break;
  704. minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
  705. size);
  706. if (copy_from_user(&param, (void __user *)arg, minsz))
  707. return -EFAULT;
  708. if (param.argsz < minsz)
  709. return -EINVAL;
  710. /* No flag is supported now */
  711. if (param.flags)
  712. return -EINVAL;
  713. mutex_lock(&container->lock);
  714. ret = tce_iommu_register_pages(container, param.vaddr,
  715. param.size);
  716. mutex_unlock(&container->lock);
  717. return ret;
  718. }
  719. case VFIO_IOMMU_SPAPR_UNREGISTER_MEMORY: {
  720. struct vfio_iommu_spapr_register_memory param;
  721. if (!container->v2)
  722. break;
  723. minsz = offsetofend(struct vfio_iommu_spapr_register_memory,
  724. size);
  725. if (copy_from_user(&param, (void __user *)arg, minsz))
  726. return -EFAULT;
  727. if (param.argsz < minsz)
  728. return -EINVAL;
  729. /* No flag is supported now */
  730. if (param.flags)
  731. return -EINVAL;
  732. mutex_lock(&container->lock);
  733. ret = tce_iommu_unregister_pages(container, param.vaddr,
  734. param.size);
  735. mutex_unlock(&container->lock);
  736. return ret;
  737. }
  738. case VFIO_IOMMU_ENABLE:
  739. if (container->v2)
  740. break;
  741. mutex_lock(&container->lock);
  742. ret = tce_iommu_enable(container);
  743. mutex_unlock(&container->lock);
  744. return ret;
  745. case VFIO_IOMMU_DISABLE:
  746. if (container->v2)
  747. break;
  748. mutex_lock(&container->lock);
  749. tce_iommu_disable(container);
  750. mutex_unlock(&container->lock);
  751. return 0;
  752. case VFIO_EEH_PE_OP: {
  753. struct tce_iommu_group *tcegrp;
  754. ret = 0;
  755. list_for_each_entry(tcegrp, &container->group_list, next) {
  756. ret = vfio_spapr_iommu_eeh_ioctl(tcegrp->grp,
  757. cmd, arg);
  758. if (ret)
  759. return ret;
  760. }
  761. return ret;
  762. }
  763. case VFIO_IOMMU_SPAPR_TCE_CREATE: {
  764. struct vfio_iommu_spapr_tce_create create;
  765. if (!container->v2)
  766. break;
  767. if (!tce_groups_attached(container))
  768. return -ENXIO;
  769. minsz = offsetofend(struct vfio_iommu_spapr_tce_create,
  770. start_addr);
  771. if (copy_from_user(&create, (void __user *)arg, minsz))
  772. return -EFAULT;
  773. if (create.argsz < minsz)
  774. return -EINVAL;
  775. if (create.flags)
  776. return -EINVAL;
  777. mutex_lock(&container->lock);
  778. ret = tce_iommu_create_window(container, create.page_shift,
  779. create.window_size, create.levels,
  780. &create.start_addr);
  781. mutex_unlock(&container->lock);
  782. if (!ret && copy_to_user((void __user *)arg, &create, minsz))
  783. ret = -EFAULT;
  784. return ret;
  785. }
  786. case VFIO_IOMMU_SPAPR_TCE_REMOVE: {
  787. struct vfio_iommu_spapr_tce_remove remove;
  788. if (!container->v2)
  789. break;
  790. if (!tce_groups_attached(container))
  791. return -ENXIO;
  792. minsz = offsetofend(struct vfio_iommu_spapr_tce_remove,
  793. start_addr);
  794. if (copy_from_user(&remove, (void __user *)arg, minsz))
  795. return -EFAULT;
  796. if (remove.argsz < minsz)
  797. return -EINVAL;
  798. if (remove.flags)
  799. return -EINVAL;
  800. mutex_lock(&container->lock);
  801. ret = tce_iommu_remove_window(container, remove.start_addr);
  802. mutex_unlock(&container->lock);
  803. return ret;
  804. }
  805. }
  806. return -ENOTTY;
  807. }
  808. static void tce_iommu_release_ownership(struct tce_container *container,
  809. struct iommu_table_group *table_group)
  810. {
  811. int i;
  812. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  813. struct iommu_table *tbl = container->tables[i];
  814. if (!tbl)
  815. continue;
  816. tce_iommu_clear(container, tbl, tbl->it_offset, tbl->it_size);
  817. tce_iommu_userspace_view_free(tbl);
  818. if (tbl->it_map)
  819. iommu_release_ownership(tbl);
  820. container->tables[i] = NULL;
  821. }
  822. }
  823. static int tce_iommu_take_ownership(struct tce_container *container,
  824. struct iommu_table_group *table_group)
  825. {
  826. int i, j, rc = 0;
  827. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  828. struct iommu_table *tbl = table_group->tables[i];
  829. if (!tbl || !tbl->it_map)
  830. continue;
  831. rc = tce_iommu_userspace_view_alloc(tbl);
  832. if (!rc)
  833. rc = iommu_take_ownership(tbl);
  834. if (rc) {
  835. for (j = 0; j < i; ++j)
  836. iommu_release_ownership(
  837. table_group->tables[j]);
  838. return rc;
  839. }
  840. }
  841. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
  842. container->tables[i] = table_group->tables[i];
  843. return 0;
  844. }
  845. static void tce_iommu_release_ownership_ddw(struct tce_container *container,
  846. struct iommu_table_group *table_group)
  847. {
  848. long i;
  849. if (!table_group->ops->unset_window) {
  850. WARN_ON_ONCE(1);
  851. return;
  852. }
  853. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
  854. table_group->ops->unset_window(table_group, i);
  855. table_group->ops->release_ownership(table_group);
  856. }
  857. static long tce_iommu_take_ownership_ddw(struct tce_container *container,
  858. struct iommu_table_group *table_group)
  859. {
  860. long i, ret = 0;
  861. struct iommu_table *tbl = NULL;
  862. if (!table_group->ops->create_table || !table_group->ops->set_window ||
  863. !table_group->ops->release_ownership) {
  864. WARN_ON_ONCE(1);
  865. return -EFAULT;
  866. }
  867. table_group->ops->take_ownership(table_group);
  868. /*
  869. * If it the first group attached, check if there is
  870. * a default DMA window and create one if none as
  871. * the userspace expects it to exist.
  872. */
  873. if (!tce_groups_attached(container) && !container->tables[0]) {
  874. ret = tce_iommu_create_table(container,
  875. table_group,
  876. 0, /* window number */
  877. IOMMU_PAGE_SHIFT_4K,
  878. table_group->tce32_size,
  879. 1, /* default levels */
  880. &tbl);
  881. if (ret)
  882. goto release_exit;
  883. else
  884. container->tables[0] = tbl;
  885. }
  886. /* Set all windows to the new group */
  887. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  888. tbl = container->tables[i];
  889. if (!tbl)
  890. continue;
  891. /* Set the default window to a new group */
  892. ret = table_group->ops->set_window(table_group, i, tbl);
  893. if (ret)
  894. goto release_exit;
  895. }
  896. return 0;
  897. release_exit:
  898. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i)
  899. table_group->ops->unset_window(table_group, i);
  900. table_group->ops->release_ownership(table_group);
  901. return ret;
  902. }
  903. static int tce_iommu_attach_group(void *iommu_data,
  904. struct iommu_group *iommu_group)
  905. {
  906. int ret;
  907. struct tce_container *container = iommu_data;
  908. struct iommu_table_group *table_group;
  909. struct tce_iommu_group *tcegrp = NULL;
  910. mutex_lock(&container->lock);
  911. /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
  912. iommu_group_id(iommu_group), iommu_group); */
  913. table_group = iommu_group_get_iommudata(iommu_group);
  914. if (tce_groups_attached(container) && (!table_group->ops ||
  915. !table_group->ops->take_ownership ||
  916. !table_group->ops->release_ownership)) {
  917. ret = -EBUSY;
  918. goto unlock_exit;
  919. }
  920. /* Check if new group has the same iommu_ops (i.e. compatible) */
  921. list_for_each_entry(tcegrp, &container->group_list, next) {
  922. struct iommu_table_group *table_group_tmp;
  923. if (tcegrp->grp == iommu_group) {
  924. pr_warn("tce_vfio: Group %d is already attached\n",
  925. iommu_group_id(iommu_group));
  926. ret = -EBUSY;
  927. goto unlock_exit;
  928. }
  929. table_group_tmp = iommu_group_get_iommudata(tcegrp->grp);
  930. if (table_group_tmp->ops->create_table !=
  931. table_group->ops->create_table) {
  932. pr_warn("tce_vfio: Group %d is incompatible with group %d\n",
  933. iommu_group_id(iommu_group),
  934. iommu_group_id(tcegrp->grp));
  935. ret = -EPERM;
  936. goto unlock_exit;
  937. }
  938. }
  939. tcegrp = kzalloc(sizeof(*tcegrp), GFP_KERNEL);
  940. if (!tcegrp) {
  941. ret = -ENOMEM;
  942. goto unlock_exit;
  943. }
  944. if (!table_group->ops || !table_group->ops->take_ownership ||
  945. !table_group->ops->release_ownership)
  946. ret = tce_iommu_take_ownership(container, table_group);
  947. else
  948. ret = tce_iommu_take_ownership_ddw(container, table_group);
  949. if (!ret) {
  950. tcegrp->grp = iommu_group;
  951. list_add(&tcegrp->next, &container->group_list);
  952. }
  953. unlock_exit:
  954. if (ret && tcegrp)
  955. kfree(tcegrp);
  956. mutex_unlock(&container->lock);
  957. return ret;
  958. }
  959. static void tce_iommu_detach_group(void *iommu_data,
  960. struct iommu_group *iommu_group)
  961. {
  962. struct tce_container *container = iommu_data;
  963. struct iommu_table_group *table_group;
  964. bool found = false;
  965. struct tce_iommu_group *tcegrp;
  966. mutex_lock(&container->lock);
  967. list_for_each_entry(tcegrp, &container->group_list, next) {
  968. if (tcegrp->grp == iommu_group) {
  969. found = true;
  970. break;
  971. }
  972. }
  973. if (!found) {
  974. pr_warn("tce_vfio: detaching unattached group #%u\n",
  975. iommu_group_id(iommu_group));
  976. goto unlock_exit;
  977. }
  978. list_del(&tcegrp->next);
  979. kfree(tcegrp);
  980. table_group = iommu_group_get_iommudata(iommu_group);
  981. BUG_ON(!table_group);
  982. if (!table_group->ops || !table_group->ops->release_ownership)
  983. tce_iommu_release_ownership(container, table_group);
  984. else
  985. tce_iommu_release_ownership_ddw(container, table_group);
  986. unlock_exit:
  987. mutex_unlock(&container->lock);
  988. }
  989. const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
  990. .name = "iommu-vfio-powerpc",
  991. .owner = THIS_MODULE,
  992. .open = tce_iommu_open,
  993. .release = tce_iommu_release,
  994. .ioctl = tce_iommu_ioctl,
  995. .attach_group = tce_iommu_attach_group,
  996. .detach_group = tce_iommu_detach_group,
  997. };
  998. static int __init tce_iommu_init(void)
  999. {
  1000. return vfio_register_iommu_driver(&tce_iommu_driver_ops);
  1001. }
  1002. static void __exit tce_iommu_cleanup(void)
  1003. {
  1004. vfio_unregister_iommu_driver(&tce_iommu_driver_ops);
  1005. }
  1006. module_init(tce_iommu_init);
  1007. module_exit(tce_iommu_cleanup);
  1008. MODULE_VERSION(DRIVER_VERSION);
  1009. MODULE_LICENSE("GPL v2");
  1010. MODULE_AUTHOR(DRIVER_AUTHOR);
  1011. MODULE_DESCRIPTION(DRIVER_DESC);