book3s_64_vio.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  16. * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
  17. * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
  18. */
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kvm.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/highmem.h>
  24. #include <linux/gfp.h>
  25. #include <linux/slab.h>
  26. #include <linux/sched/signal.h>
  27. #include <linux/hugetlb.h>
  28. #include <linux/list.h>
  29. #include <linux/anon_inodes.h>
  30. #include <linux/iommu.h>
  31. #include <linux/file.h>
  32. #include <asm/kvm_ppc.h>
  33. #include <asm/kvm_book3s.h>
  34. #include <asm/book3s/64/mmu-hash.h>
  35. #include <asm/hvcall.h>
  36. #include <asm/synch.h>
  37. #include <asm/ppc-opcode.h>
  38. #include <asm/kvm_host.h>
  39. #include <asm/udbg.h>
  40. #include <asm/iommu.h>
  41. #include <asm/tce.h>
  42. #include <asm/mmu_context.h>
  43. static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
  44. {
  45. return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
  46. }
  47. static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
  48. {
  49. unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
  50. (tce_pages * sizeof(struct page *));
  51. return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
  52. }
  53. static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
  54. {
  55. long ret = 0;
  56. if (!current || !current->mm)
  57. return ret; /* process exited */
  58. down_write(&current->mm->mmap_sem);
  59. if (inc) {
  60. unsigned long locked, lock_limit;
  61. locked = current->mm->locked_vm + stt_pages;
  62. lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  63. if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  64. ret = -ENOMEM;
  65. else
  66. current->mm->locked_vm += stt_pages;
  67. } else {
  68. if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
  69. stt_pages = current->mm->locked_vm;
  70. current->mm->locked_vm -= stt_pages;
  71. }
  72. pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
  73. inc ? '+' : '-',
  74. stt_pages << PAGE_SHIFT,
  75. current->mm->locked_vm << PAGE_SHIFT,
  76. rlimit(RLIMIT_MEMLOCK),
  77. ret ? " - exceeded" : "");
  78. up_write(&current->mm->mmap_sem);
  79. return ret;
  80. }
  81. static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
  82. {
  83. struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
  84. struct kvmppc_spapr_tce_iommu_table, rcu);
  85. iommu_tce_table_put(stit->tbl);
  86. kfree(stit);
  87. }
  88. static void kvm_spapr_tce_liobn_put(struct kref *kref)
  89. {
  90. struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
  91. struct kvmppc_spapr_tce_iommu_table, kref);
  92. list_del_rcu(&stit->next);
  93. call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
  94. }
  95. extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
  96. struct iommu_group *grp)
  97. {
  98. int i;
  99. struct kvmppc_spapr_tce_table *stt;
  100. struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
  101. struct iommu_table_group *table_group = NULL;
  102. list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
  103. table_group = iommu_group_get_iommudata(grp);
  104. if (WARN_ON(!table_group))
  105. continue;
  106. list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
  107. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  108. if (table_group->tables[i] != stit->tbl)
  109. continue;
  110. kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
  111. return;
  112. }
  113. }
  114. }
  115. }
  116. extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
  117. struct iommu_group *grp)
  118. {
  119. struct kvmppc_spapr_tce_table *stt = NULL;
  120. bool found = false;
  121. struct iommu_table *tbl = NULL;
  122. struct iommu_table_group *table_group;
  123. long i;
  124. struct kvmppc_spapr_tce_iommu_table *stit;
  125. struct fd f;
  126. f = fdget(tablefd);
  127. if (!f.file)
  128. return -EBADF;
  129. list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
  130. if (stt == f.file->private_data) {
  131. found = true;
  132. break;
  133. }
  134. }
  135. fdput(f);
  136. if (!found)
  137. return -EINVAL;
  138. table_group = iommu_group_get_iommudata(grp);
  139. if (WARN_ON(!table_group))
  140. return -EFAULT;
  141. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  142. struct iommu_table *tbltmp = table_group->tables[i];
  143. if (!tbltmp)
  144. continue;
  145. /* Make sure hardware table parameters are compatible */
  146. if ((tbltmp->it_page_shift <= stt->page_shift) &&
  147. (tbltmp->it_offset << tbltmp->it_page_shift ==
  148. stt->offset << stt->page_shift) &&
  149. (tbltmp->it_size << tbltmp->it_page_shift >=
  150. stt->size << stt->page_shift)) {
  151. /*
  152. * Reference the table to avoid races with
  153. * add/remove DMA windows.
  154. */
  155. tbl = iommu_tce_table_get(tbltmp);
  156. break;
  157. }
  158. }
  159. if (!tbl)
  160. return -EINVAL;
  161. list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
  162. if (tbl != stit->tbl)
  163. continue;
  164. if (!kref_get_unless_zero(&stit->kref)) {
  165. /* stit is being destroyed */
  166. iommu_tce_table_put(tbl);
  167. return -ENOTTY;
  168. }
  169. /*
  170. * The table is already known to this KVM, we just increased
  171. * its KVM reference counter and can return.
  172. */
  173. return 0;
  174. }
  175. stit = kzalloc(sizeof(*stit), GFP_KERNEL);
  176. if (!stit) {
  177. iommu_tce_table_put(tbl);
  178. return -ENOMEM;
  179. }
  180. stit->tbl = tbl;
  181. kref_init(&stit->kref);
  182. list_add_rcu(&stit->next, &stt->iommu_tables);
  183. return 0;
  184. }
  185. static void release_spapr_tce_table(struct rcu_head *head)
  186. {
  187. struct kvmppc_spapr_tce_table *stt = container_of(head,
  188. struct kvmppc_spapr_tce_table, rcu);
  189. unsigned long i, npages = kvmppc_tce_pages(stt->size);
  190. for (i = 0; i < npages; i++)
  191. __free_page(stt->pages[i]);
  192. kfree(stt);
  193. }
  194. static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
  195. {
  196. struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
  197. struct page *page;
  198. if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
  199. return VM_FAULT_SIGBUS;
  200. page = stt->pages[vmf->pgoff];
  201. get_page(page);
  202. vmf->page = page;
  203. return 0;
  204. }
  205. static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
  206. .fault = kvm_spapr_tce_fault,
  207. };
  208. static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
  209. {
  210. vma->vm_ops = &kvm_spapr_tce_vm_ops;
  211. return 0;
  212. }
  213. static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
  214. {
  215. struct kvmppc_spapr_tce_table *stt = filp->private_data;
  216. struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
  217. struct kvm *kvm = stt->kvm;
  218. mutex_lock(&kvm->lock);
  219. list_del_rcu(&stt->list);
  220. mutex_unlock(&kvm->lock);
  221. list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
  222. WARN_ON(!kref_read(&stit->kref));
  223. while (1) {
  224. if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
  225. break;
  226. }
  227. }
  228. kvm_put_kvm(stt->kvm);
  229. kvmppc_account_memlimit(
  230. kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
  231. call_rcu(&stt->rcu, release_spapr_tce_table);
  232. return 0;
  233. }
  234. static const struct file_operations kvm_spapr_tce_fops = {
  235. .mmap = kvm_spapr_tce_mmap,
  236. .release = kvm_spapr_tce_release,
  237. };
  238. long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
  239. struct kvm_create_spapr_tce_64 *args)
  240. {
  241. struct kvmppc_spapr_tce_table *stt = NULL;
  242. struct kvmppc_spapr_tce_table *siter;
  243. unsigned long npages, size = args->size;
  244. int ret = -ENOMEM;
  245. int i;
  246. if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
  247. (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
  248. return -EINVAL;
  249. npages = kvmppc_tce_pages(size);
  250. ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
  251. if (ret)
  252. return ret;
  253. ret = -ENOMEM;
  254. stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
  255. GFP_KERNEL);
  256. if (!stt)
  257. goto fail_acct;
  258. stt->liobn = args->liobn;
  259. stt->page_shift = args->page_shift;
  260. stt->offset = args->offset;
  261. stt->size = size;
  262. stt->kvm = kvm;
  263. INIT_LIST_HEAD_RCU(&stt->iommu_tables);
  264. for (i = 0; i < npages; i++) {
  265. stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
  266. if (!stt->pages[i])
  267. goto fail;
  268. }
  269. mutex_lock(&kvm->lock);
  270. /* Check this LIOBN hasn't been previously allocated */
  271. ret = 0;
  272. list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
  273. if (siter->liobn == args->liobn) {
  274. ret = -EBUSY;
  275. break;
  276. }
  277. }
  278. if (!ret)
  279. ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
  280. stt, O_RDWR | O_CLOEXEC);
  281. if (ret >= 0) {
  282. list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
  283. kvm_get_kvm(kvm);
  284. }
  285. mutex_unlock(&kvm->lock);
  286. if (ret >= 0)
  287. return ret;
  288. fail:
  289. for (i = 0; i < npages; i++)
  290. if (stt->pages[i])
  291. __free_page(stt->pages[i]);
  292. kfree(stt);
  293. fail_acct:
  294. kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
  295. return ret;
  296. }
  297. static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
  298. unsigned long tce)
  299. {
  300. unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
  301. enum dma_data_direction dir = iommu_tce_direction(tce);
  302. struct kvmppc_spapr_tce_iommu_table *stit;
  303. unsigned long ua = 0;
  304. /* Allow userspace to poison TCE table */
  305. if (dir == DMA_NONE)
  306. return H_SUCCESS;
  307. if (iommu_tce_check_gpa(stt->page_shift, gpa))
  308. return H_TOO_HARD;
  309. if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
  310. return H_TOO_HARD;
  311. list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
  312. unsigned long hpa = 0;
  313. struct mm_iommu_table_group_mem_t *mem;
  314. long shift = stit->tbl->it_page_shift;
  315. mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
  316. if (!mem)
  317. return H_TOO_HARD;
  318. if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
  319. return H_TOO_HARD;
  320. }
  321. return H_SUCCESS;
  322. }
  323. static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
  324. {
  325. unsigned long hpa = 0;
  326. enum dma_data_direction dir = DMA_NONE;
  327. iommu_tce_xchg(tbl, entry, &hpa, &dir);
  328. }
  329. static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
  330. struct iommu_table *tbl, unsigned long entry)
  331. {
  332. struct mm_iommu_table_group_mem_t *mem = NULL;
  333. const unsigned long pgsize = 1ULL << tbl->it_page_shift;
  334. __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
  335. if (!pua)
  336. return H_SUCCESS;
  337. mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
  338. if (!mem)
  339. return H_TOO_HARD;
  340. mm_iommu_mapped_dec(mem);
  341. *pua = cpu_to_be64(0);
  342. return H_SUCCESS;
  343. }
  344. static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
  345. struct iommu_table *tbl, unsigned long entry)
  346. {
  347. enum dma_data_direction dir = DMA_NONE;
  348. unsigned long hpa = 0;
  349. long ret;
  350. if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
  351. return H_TOO_HARD;
  352. if (dir == DMA_NONE)
  353. return H_SUCCESS;
  354. ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
  355. if (ret != H_SUCCESS)
  356. iommu_tce_xchg(tbl, entry, &hpa, &dir);
  357. return ret;
  358. }
  359. static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
  360. struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
  361. unsigned long entry)
  362. {
  363. unsigned long i, ret = H_SUCCESS;
  364. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  365. unsigned long io_entry = entry * subpages;
  366. for (i = 0; i < subpages; ++i) {
  367. ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
  368. if (ret != H_SUCCESS)
  369. break;
  370. }
  371. return ret;
  372. }
  373. long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
  374. unsigned long entry, unsigned long ua,
  375. enum dma_data_direction dir)
  376. {
  377. long ret;
  378. unsigned long hpa;
  379. __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
  380. struct mm_iommu_table_group_mem_t *mem;
  381. if (!pua)
  382. /* it_userspace allocation might be delayed */
  383. return H_TOO_HARD;
  384. mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
  385. if (!mem)
  386. /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
  387. return H_TOO_HARD;
  388. if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
  389. return H_TOO_HARD;
  390. if (mm_iommu_mapped_inc(mem))
  391. return H_TOO_HARD;
  392. ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
  393. if (WARN_ON_ONCE(ret)) {
  394. mm_iommu_mapped_dec(mem);
  395. return H_TOO_HARD;
  396. }
  397. if (dir != DMA_NONE)
  398. kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
  399. *pua = cpu_to_be64(ua);
  400. return 0;
  401. }
  402. static long kvmppc_tce_iommu_map(struct kvm *kvm,
  403. struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
  404. unsigned long entry, unsigned long ua,
  405. enum dma_data_direction dir)
  406. {
  407. unsigned long i, pgoff, ret = H_SUCCESS;
  408. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  409. unsigned long io_entry = entry * subpages;
  410. for (i = 0, pgoff = 0; i < subpages;
  411. ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
  412. ret = kvmppc_tce_iommu_do_map(kvm, tbl,
  413. io_entry + i, ua + pgoff, dir);
  414. if (ret != H_SUCCESS)
  415. break;
  416. }
  417. return ret;
  418. }
  419. long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  420. unsigned long ioba, unsigned long tce)
  421. {
  422. struct kvmppc_spapr_tce_table *stt;
  423. long ret, idx;
  424. struct kvmppc_spapr_tce_iommu_table *stit;
  425. unsigned long entry, ua = 0;
  426. enum dma_data_direction dir;
  427. /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
  428. /* liobn, ioba, tce); */
  429. stt = kvmppc_find_table(vcpu->kvm, liobn);
  430. if (!stt)
  431. return H_TOO_HARD;
  432. ret = kvmppc_ioba_validate(stt, ioba, 1);
  433. if (ret != H_SUCCESS)
  434. return ret;
  435. ret = kvmppc_tce_validate(stt, tce);
  436. if (ret != H_SUCCESS)
  437. return ret;
  438. dir = iommu_tce_direction(tce);
  439. idx = srcu_read_lock(&vcpu->kvm->srcu);
  440. if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) {
  441. ret = H_PARAMETER;
  442. goto unlock_exit;
  443. }
  444. entry = ioba >> stt->page_shift;
  445. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  446. if (dir == DMA_NONE)
  447. ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
  448. stit->tbl, entry);
  449. else
  450. ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
  451. entry, ua, dir);
  452. if (ret != H_SUCCESS) {
  453. kvmppc_clear_tce(stit->tbl, entry);
  454. goto unlock_exit;
  455. }
  456. }
  457. kvmppc_tce_put(stt, entry, tce);
  458. unlock_exit:
  459. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  460. return ret;
  461. }
  462. EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
  463. long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
  464. unsigned long liobn, unsigned long ioba,
  465. unsigned long tce_list, unsigned long npages)
  466. {
  467. struct kvmppc_spapr_tce_table *stt;
  468. long i, ret = H_SUCCESS, idx;
  469. unsigned long entry, ua = 0;
  470. u64 __user *tces;
  471. u64 tce;
  472. struct kvmppc_spapr_tce_iommu_table *stit;
  473. stt = kvmppc_find_table(vcpu->kvm, liobn);
  474. if (!stt)
  475. return H_TOO_HARD;
  476. entry = ioba >> stt->page_shift;
  477. /*
  478. * SPAPR spec says that the maximum size of the list is 512 TCEs
  479. * so the whole table fits in 4K page
  480. */
  481. if (npages > 512)
  482. return H_PARAMETER;
  483. if (tce_list & (SZ_4K - 1))
  484. return H_PARAMETER;
  485. ret = kvmppc_ioba_validate(stt, ioba, npages);
  486. if (ret != H_SUCCESS)
  487. return ret;
  488. idx = srcu_read_lock(&vcpu->kvm->srcu);
  489. if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
  490. ret = H_TOO_HARD;
  491. goto unlock_exit;
  492. }
  493. tces = (u64 __user *) ua;
  494. for (i = 0; i < npages; ++i) {
  495. if (get_user(tce, tces + i)) {
  496. ret = H_TOO_HARD;
  497. goto unlock_exit;
  498. }
  499. tce = be64_to_cpu(tce);
  500. ret = kvmppc_tce_validate(stt, tce);
  501. if (ret != H_SUCCESS)
  502. goto unlock_exit;
  503. }
  504. for (i = 0; i < npages; ++i) {
  505. /*
  506. * This looks unsafe, because we validate, then regrab
  507. * the TCE from userspace which could have been changed by
  508. * another thread.
  509. *
  510. * But it actually is safe, because the relevant checks will be
  511. * re-executed in the following code. If userspace tries to
  512. * change this dodgily it will result in a messier failure mode
  513. * but won't threaten the host.
  514. */
  515. if (get_user(tce, tces + i)) {
  516. ret = H_TOO_HARD;
  517. goto unlock_exit;
  518. }
  519. tce = be64_to_cpu(tce);
  520. if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
  521. return H_PARAMETER;
  522. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  523. ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
  524. stit->tbl, entry + i, ua,
  525. iommu_tce_direction(tce));
  526. if (ret != H_SUCCESS) {
  527. kvmppc_clear_tce(stit->tbl, entry);
  528. goto unlock_exit;
  529. }
  530. }
  531. kvmppc_tce_put(stt, entry + i, tce);
  532. }
  533. unlock_exit:
  534. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  535. return ret;
  536. }
  537. EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
  538. long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
  539. unsigned long liobn, unsigned long ioba,
  540. unsigned long tce_value, unsigned long npages)
  541. {
  542. struct kvmppc_spapr_tce_table *stt;
  543. long i, ret;
  544. struct kvmppc_spapr_tce_iommu_table *stit;
  545. stt = kvmppc_find_table(vcpu->kvm, liobn);
  546. if (!stt)
  547. return H_TOO_HARD;
  548. ret = kvmppc_ioba_validate(stt, ioba, npages);
  549. if (ret != H_SUCCESS)
  550. return ret;
  551. /* Check permission bits only to allow userspace poison TCE for debug */
  552. if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
  553. return H_PARAMETER;
  554. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  555. unsigned long entry = ioba >> stt->page_shift;
  556. for (i = 0; i < npages; ++i) {
  557. ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
  558. stit->tbl, entry + i);
  559. if (ret == H_SUCCESS)
  560. continue;
  561. if (ret == H_TOO_HARD)
  562. return ret;
  563. WARN_ON_ONCE(1);
  564. kvmppc_clear_tce(stit->tbl, entry);
  565. }
  566. }
  567. for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
  568. kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
  569. return H_SUCCESS;
  570. }
  571. EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);