book3s_64_vio.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  16. * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
  17. * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
  18. */
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kvm.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/highmem.h>
  24. #include <linux/gfp.h>
  25. #include <linux/slab.h>
  26. #include <linux/sched/signal.h>
  27. #include <linux/hugetlb.h>
  28. #include <linux/list.h>
  29. #include <linux/anon_inodes.h>
  30. #include <linux/iommu.h>
  31. #include <linux/file.h>
  32. #include <asm/tlbflush.h>
  33. #include <asm/kvm_ppc.h>
  34. #include <asm/kvm_book3s.h>
  35. #include <asm/book3s/64/mmu-hash.h>
  36. #include <asm/hvcall.h>
  37. #include <asm/synch.h>
  38. #include <asm/ppc-opcode.h>
  39. #include <asm/kvm_host.h>
  40. #include <asm/udbg.h>
  41. #include <asm/iommu.h>
  42. #include <asm/tce.h>
  43. #include <asm/mmu_context.h>
  44. static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
  45. {
  46. return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
  47. }
  48. static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
  49. {
  50. unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
  51. (tce_pages * sizeof(struct page *));
  52. return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
  53. }
  54. static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
  55. {
  56. long ret = 0;
  57. if (!current || !current->mm)
  58. return ret; /* process exited */
  59. down_write(&current->mm->mmap_sem);
  60. if (inc) {
  61. unsigned long locked, lock_limit;
  62. locked = current->mm->locked_vm + stt_pages;
  63. lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
  64. if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  65. ret = -ENOMEM;
  66. else
  67. current->mm->locked_vm += stt_pages;
  68. } else {
  69. if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
  70. stt_pages = current->mm->locked_vm;
  71. current->mm->locked_vm -= stt_pages;
  72. }
  73. pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
  74. inc ? '+' : '-',
  75. stt_pages << PAGE_SHIFT,
  76. current->mm->locked_vm << PAGE_SHIFT,
  77. rlimit(RLIMIT_MEMLOCK),
  78. ret ? " - exceeded" : "");
  79. up_write(&current->mm->mmap_sem);
  80. return ret;
  81. }
  82. static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
  83. {
  84. struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
  85. struct kvmppc_spapr_tce_iommu_table, rcu);
  86. iommu_tce_table_put(stit->tbl);
  87. kfree(stit);
  88. }
  89. static void kvm_spapr_tce_liobn_put(struct kref *kref)
  90. {
  91. struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
  92. struct kvmppc_spapr_tce_iommu_table, kref);
  93. list_del_rcu(&stit->next);
  94. call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
  95. }
  96. extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
  97. struct iommu_group *grp)
  98. {
  99. int i;
  100. struct kvmppc_spapr_tce_table *stt;
  101. struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
  102. struct iommu_table_group *table_group = NULL;
  103. list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
  104. table_group = iommu_group_get_iommudata(grp);
  105. if (WARN_ON(!table_group))
  106. continue;
  107. list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
  108. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  109. if (table_group->tables[i] != stit->tbl)
  110. continue;
  111. kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
  112. return;
  113. }
  114. }
  115. }
  116. }
  117. extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
  118. struct iommu_group *grp)
  119. {
  120. struct kvmppc_spapr_tce_table *stt = NULL;
  121. bool found = false;
  122. struct iommu_table *tbl = NULL;
  123. struct iommu_table_group *table_group;
  124. long i;
  125. struct kvmppc_spapr_tce_iommu_table *stit;
  126. struct fd f;
  127. f = fdget(tablefd);
  128. if (!f.file)
  129. return -EBADF;
  130. list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
  131. if (stt == f.file->private_data) {
  132. found = true;
  133. break;
  134. }
  135. }
  136. fdput(f);
  137. if (!found)
  138. return -EINVAL;
  139. table_group = iommu_group_get_iommudata(grp);
  140. if (WARN_ON(!table_group))
  141. return -EFAULT;
  142. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  143. struct iommu_table *tbltmp = table_group->tables[i];
  144. if (!tbltmp)
  145. continue;
  146. /* Make sure hardware table parameters are compatible */
  147. if ((tbltmp->it_page_shift <= stt->page_shift) &&
  148. (tbltmp->it_offset << tbltmp->it_page_shift ==
  149. stt->offset << stt->page_shift) &&
  150. (tbltmp->it_size << tbltmp->it_page_shift ==
  151. stt->size << stt->page_shift)) {
  152. /*
  153. * Reference the table to avoid races with
  154. * add/remove DMA windows.
  155. */
  156. tbl = iommu_tce_table_get(tbltmp);
  157. break;
  158. }
  159. }
  160. if (!tbl)
  161. return -EINVAL;
  162. list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
  163. if (tbl != stit->tbl)
  164. continue;
  165. if (!kref_get_unless_zero(&stit->kref)) {
  166. /* stit is being destroyed */
  167. iommu_tce_table_put(tbl);
  168. return -ENOTTY;
  169. }
  170. /*
  171. * The table is already known to this KVM, we just increased
  172. * its KVM reference counter and can return.
  173. */
  174. return 0;
  175. }
  176. stit = kzalloc(sizeof(*stit), GFP_KERNEL);
  177. if (!stit) {
  178. iommu_tce_table_put(tbl);
  179. return -ENOMEM;
  180. }
  181. stit->tbl = tbl;
  182. kref_init(&stit->kref);
  183. list_add_rcu(&stit->next, &stt->iommu_tables);
  184. return 0;
  185. }
  186. static void release_spapr_tce_table(struct rcu_head *head)
  187. {
  188. struct kvmppc_spapr_tce_table *stt = container_of(head,
  189. struct kvmppc_spapr_tce_table, rcu);
  190. unsigned long i, npages = kvmppc_tce_pages(stt->size);
  191. for (i = 0; i < npages; i++)
  192. __free_page(stt->pages[i]);
  193. kfree(stt);
  194. }
  195. static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
  196. {
  197. struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
  198. struct page *page;
  199. if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
  200. return VM_FAULT_SIGBUS;
  201. page = stt->pages[vmf->pgoff];
  202. get_page(page);
  203. vmf->page = page;
  204. return 0;
  205. }
  206. static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
  207. .fault = kvm_spapr_tce_fault,
  208. };
  209. static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
  210. {
  211. vma->vm_ops = &kvm_spapr_tce_vm_ops;
  212. return 0;
  213. }
  214. static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
  215. {
  216. struct kvmppc_spapr_tce_table *stt = filp->private_data;
  217. struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
  218. struct kvm *kvm = stt->kvm;
  219. mutex_lock(&kvm->lock);
  220. list_del_rcu(&stt->list);
  221. mutex_unlock(&kvm->lock);
  222. list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
  223. WARN_ON(!kref_read(&stit->kref));
  224. while (1) {
  225. if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
  226. break;
  227. }
  228. }
  229. kvm_put_kvm(stt->kvm);
  230. kvmppc_account_memlimit(
  231. kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
  232. call_rcu(&stt->rcu, release_spapr_tce_table);
  233. return 0;
  234. }
  235. static const struct file_operations kvm_spapr_tce_fops = {
  236. .mmap = kvm_spapr_tce_mmap,
  237. .release = kvm_spapr_tce_release,
  238. };
  239. long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
  240. struct kvm_create_spapr_tce_64 *args)
  241. {
  242. struct kvmppc_spapr_tce_table *stt = NULL;
  243. struct kvmppc_spapr_tce_table *siter;
  244. unsigned long npages, size;
  245. int ret = -ENOMEM;
  246. int i;
  247. if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
  248. (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
  249. return -EINVAL;
  250. size = _ALIGN_UP(args->size, PAGE_SIZE >> 3);
  251. npages = kvmppc_tce_pages(size);
  252. ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
  253. if (ret)
  254. return ret;
  255. ret = -ENOMEM;
  256. stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
  257. GFP_KERNEL);
  258. if (!stt)
  259. goto fail_acct;
  260. stt->liobn = args->liobn;
  261. stt->page_shift = args->page_shift;
  262. stt->offset = args->offset;
  263. stt->size = size;
  264. stt->kvm = kvm;
  265. INIT_LIST_HEAD_RCU(&stt->iommu_tables);
  266. for (i = 0; i < npages; i++) {
  267. stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
  268. if (!stt->pages[i])
  269. goto fail;
  270. }
  271. mutex_lock(&kvm->lock);
  272. /* Check this LIOBN hasn't been previously allocated */
  273. ret = 0;
  274. list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
  275. if (siter->liobn == args->liobn) {
  276. ret = -EBUSY;
  277. break;
  278. }
  279. }
  280. if (!ret)
  281. ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
  282. stt, O_RDWR | O_CLOEXEC);
  283. if (ret >= 0) {
  284. list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
  285. kvm_get_kvm(kvm);
  286. }
  287. mutex_unlock(&kvm->lock);
  288. if (ret >= 0)
  289. return ret;
  290. fail:
  291. for (i = 0; i < npages; i++)
  292. if (stt->pages[i])
  293. __free_page(stt->pages[i]);
  294. kfree(stt);
  295. fail_acct:
  296. kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
  297. return ret;
  298. }
  299. static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
  300. {
  301. unsigned long hpa = 0;
  302. enum dma_data_direction dir = DMA_NONE;
  303. iommu_tce_xchg(tbl, entry, &hpa, &dir);
  304. }
  305. static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
  306. struct iommu_table *tbl, unsigned long entry)
  307. {
  308. struct mm_iommu_table_group_mem_t *mem = NULL;
  309. const unsigned long pgsize = 1ULL << tbl->it_page_shift;
  310. unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
  311. if (!pua)
  312. /* it_userspace allocation might be delayed */
  313. return H_TOO_HARD;
  314. mem = mm_iommu_lookup(kvm->mm, *pua, pgsize);
  315. if (!mem)
  316. return H_TOO_HARD;
  317. mm_iommu_mapped_dec(mem);
  318. *pua = 0;
  319. return H_SUCCESS;
  320. }
  321. static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
  322. struct iommu_table *tbl, unsigned long entry)
  323. {
  324. enum dma_data_direction dir = DMA_NONE;
  325. unsigned long hpa = 0;
  326. long ret;
  327. if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
  328. return H_HARDWARE;
  329. if (dir == DMA_NONE)
  330. return H_SUCCESS;
  331. ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
  332. if (ret != H_SUCCESS)
  333. iommu_tce_xchg(tbl, entry, &hpa, &dir);
  334. return ret;
  335. }
  336. static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
  337. struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
  338. unsigned long entry)
  339. {
  340. unsigned long i, ret = H_SUCCESS;
  341. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  342. unsigned long io_entry = entry * subpages;
  343. for (i = 0; i < subpages; ++i) {
  344. ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
  345. if (ret != H_SUCCESS)
  346. break;
  347. }
  348. return ret;
  349. }
  350. long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
  351. unsigned long entry, unsigned long ua,
  352. enum dma_data_direction dir)
  353. {
  354. long ret;
  355. unsigned long hpa, *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
  356. struct mm_iommu_table_group_mem_t *mem;
  357. if (!pua)
  358. /* it_userspace allocation might be delayed */
  359. return H_TOO_HARD;
  360. mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
  361. if (!mem)
  362. /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
  363. return H_TOO_HARD;
  364. if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
  365. return H_HARDWARE;
  366. if (mm_iommu_mapped_inc(mem))
  367. return H_CLOSED;
  368. ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
  369. if (WARN_ON_ONCE(ret)) {
  370. mm_iommu_mapped_dec(mem);
  371. return H_HARDWARE;
  372. }
  373. if (dir != DMA_NONE)
  374. kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
  375. *pua = ua;
  376. return 0;
  377. }
  378. static long kvmppc_tce_iommu_map(struct kvm *kvm,
  379. struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
  380. unsigned long entry, unsigned long ua,
  381. enum dma_data_direction dir)
  382. {
  383. unsigned long i, pgoff, ret = H_SUCCESS;
  384. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  385. unsigned long io_entry = entry * subpages;
  386. for (i = 0, pgoff = 0; i < subpages;
  387. ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
  388. ret = kvmppc_tce_iommu_do_map(kvm, tbl,
  389. io_entry + i, ua + pgoff, dir);
  390. if (ret != H_SUCCESS)
  391. break;
  392. }
  393. return ret;
  394. }
  395. long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  396. unsigned long ioba, unsigned long tce)
  397. {
  398. struct kvmppc_spapr_tce_table *stt;
  399. long ret, idx;
  400. struct kvmppc_spapr_tce_iommu_table *stit;
  401. unsigned long entry, ua = 0;
  402. enum dma_data_direction dir;
  403. /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
  404. /* liobn, ioba, tce); */
  405. stt = kvmppc_find_table(vcpu->kvm, liobn);
  406. if (!stt)
  407. return H_TOO_HARD;
  408. ret = kvmppc_ioba_validate(stt, ioba, 1);
  409. if (ret != H_SUCCESS)
  410. return ret;
  411. ret = kvmppc_tce_validate(stt, tce);
  412. if (ret != H_SUCCESS)
  413. return ret;
  414. dir = iommu_tce_direction(tce);
  415. idx = srcu_read_lock(&vcpu->kvm->srcu);
  416. if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
  417. tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
  418. ret = H_PARAMETER;
  419. goto unlock_exit;
  420. }
  421. entry = ioba >> stt->page_shift;
  422. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  423. if (dir == DMA_NONE)
  424. ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
  425. stit->tbl, entry);
  426. else
  427. ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
  428. entry, ua, dir);
  429. if (ret == H_SUCCESS)
  430. continue;
  431. if (ret == H_TOO_HARD)
  432. goto unlock_exit;
  433. WARN_ON_ONCE(1);
  434. kvmppc_clear_tce(stit->tbl, entry);
  435. }
  436. kvmppc_tce_put(stt, entry, tce);
  437. unlock_exit:
  438. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  439. return ret;
  440. }
  441. EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
  442. long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
  443. unsigned long liobn, unsigned long ioba,
  444. unsigned long tce_list, unsigned long npages)
  445. {
  446. struct kvmppc_spapr_tce_table *stt;
  447. long i, ret = H_SUCCESS, idx;
  448. unsigned long entry, ua = 0;
  449. u64 __user *tces;
  450. u64 tce;
  451. struct kvmppc_spapr_tce_iommu_table *stit;
  452. stt = kvmppc_find_table(vcpu->kvm, liobn);
  453. if (!stt)
  454. return H_TOO_HARD;
  455. entry = ioba >> stt->page_shift;
  456. /*
  457. * SPAPR spec says that the maximum size of the list is 512 TCEs
  458. * so the whole table fits in 4K page
  459. */
  460. if (npages > 512)
  461. return H_PARAMETER;
  462. if (tce_list & (SZ_4K - 1))
  463. return H_PARAMETER;
  464. ret = kvmppc_ioba_validate(stt, ioba, npages);
  465. if (ret != H_SUCCESS)
  466. return ret;
  467. idx = srcu_read_lock(&vcpu->kvm->srcu);
  468. if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
  469. ret = H_TOO_HARD;
  470. goto unlock_exit;
  471. }
  472. tces = (u64 __user *) ua;
  473. for (i = 0; i < npages; ++i) {
  474. if (get_user(tce, tces + i)) {
  475. ret = H_TOO_HARD;
  476. goto unlock_exit;
  477. }
  478. tce = be64_to_cpu(tce);
  479. ret = kvmppc_tce_validate(stt, tce);
  480. if (ret != H_SUCCESS)
  481. goto unlock_exit;
  482. if (kvmppc_gpa_to_ua(vcpu->kvm,
  483. tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
  484. &ua, NULL))
  485. return H_PARAMETER;
  486. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  487. ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
  488. stit->tbl, entry + i, ua,
  489. iommu_tce_direction(tce));
  490. if (ret == H_SUCCESS)
  491. continue;
  492. if (ret == H_TOO_HARD)
  493. goto unlock_exit;
  494. WARN_ON_ONCE(1);
  495. kvmppc_clear_tce(stit->tbl, entry);
  496. }
  497. kvmppc_tce_put(stt, entry + i, tce);
  498. }
  499. unlock_exit:
  500. srcu_read_unlock(&vcpu->kvm->srcu, idx);
  501. return ret;
  502. }
  503. EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
  504. long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
  505. unsigned long liobn, unsigned long ioba,
  506. unsigned long tce_value, unsigned long npages)
  507. {
  508. struct kvmppc_spapr_tce_table *stt;
  509. long i, ret;
  510. struct kvmppc_spapr_tce_iommu_table *stit;
  511. stt = kvmppc_find_table(vcpu->kvm, liobn);
  512. if (!stt)
  513. return H_TOO_HARD;
  514. ret = kvmppc_ioba_validate(stt, ioba, npages);
  515. if (ret != H_SUCCESS)
  516. return ret;
  517. /* Check permission bits only to allow userspace poison TCE for debug */
  518. if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
  519. return H_PARAMETER;
  520. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  521. unsigned long entry = ioba >> stt->page_shift;
  522. for (i = 0; i < npages; ++i) {
  523. ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
  524. stit->tbl, entry + i);
  525. if (ret == H_SUCCESS)
  526. continue;
  527. if (ret == H_TOO_HARD)
  528. return ret;
  529. WARN_ON_ONCE(1);
  530. kvmppc_clear_tce(stit->tbl, entry);
  531. }
  532. }
  533. for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
  534. kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
  535. return H_SUCCESS;
  536. }
  537. EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);