book3s_64_vio_hv.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License, version 2, as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  14. *
  15. * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  16. * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
  17. * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
  18. */
  19. #include <linux/types.h>
  20. #include <linux/string.h>
  21. #include <linux/kvm.h>
  22. #include <linux/kvm_host.h>
  23. #include <linux/highmem.h>
  24. #include <linux/gfp.h>
  25. #include <linux/slab.h>
  26. #include <linux/hugetlb.h>
  27. #include <linux/list.h>
  28. #include <linux/stringify.h>
  29. #include <asm/kvm_ppc.h>
  30. #include <asm/kvm_book3s.h>
  31. #include <asm/book3s/64/mmu-hash.h>
  32. #include <asm/mmu_context.h>
  33. #include <asm/hvcall.h>
  34. #include <asm/synch.h>
  35. #include <asm/ppc-opcode.h>
  36. #include <asm/kvm_host.h>
  37. #include <asm/udbg.h>
  38. #include <asm/iommu.h>
  39. #include <asm/tce.h>
  40. #include <asm/pte-walk.h>
  41. #ifdef CONFIG_BUG
  42. #define WARN_ON_ONCE_RM(condition) ({ \
  43. static bool __section(.data.unlikely) __warned; \
  44. int __ret_warn_once = !!(condition); \
  45. \
  46. if (unlikely(__ret_warn_once && !__warned)) { \
  47. __warned = true; \
  48. pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
  49. __stringify(condition), \
  50. __func__, __LINE__); \
  51. dump_stack(); \
  52. } \
  53. unlikely(__ret_warn_once); \
  54. })
  55. #else
  56. #define WARN_ON_ONCE_RM(condition) ({ \
  57. int __ret_warn_on = !!(condition); \
  58. unlikely(__ret_warn_on); \
  59. })
  60. #endif
  61. #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
  62. /*
  63. * Finds a TCE table descriptor by LIOBN.
  64. *
  65. * WARNING: This will be called in real or virtual mode on HV KVM and virtual
  66. * mode on PR KVM
  67. */
  68. struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
  69. unsigned long liobn)
  70. {
  71. struct kvmppc_spapr_tce_table *stt;
  72. list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
  73. if (stt->liobn == liobn)
  74. return stt;
  75. return NULL;
  76. }
  77. EXPORT_SYMBOL_GPL(kvmppc_find_table);
  78. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  79. /*
  80. * Validates TCE address.
  81. * At the moment flags and page mask are validated.
  82. * As the host kernel does not access those addresses (just puts them
  83. * to the table and user space is supposed to process them), we can skip
  84. * checking other things (such as TCE is a guest RAM address or the page
  85. * was actually allocated).
  86. */
  87. static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
  88. unsigned long tce)
  89. {
  90. unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
  91. enum dma_data_direction dir = iommu_tce_direction(tce);
  92. struct kvmppc_spapr_tce_iommu_table *stit;
  93. unsigned long ua = 0;
  94. /* Allow userspace to poison TCE table */
  95. if (dir == DMA_NONE)
  96. return H_SUCCESS;
  97. if (iommu_tce_check_gpa(stt->page_shift, gpa))
  98. return H_PARAMETER;
  99. if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
  100. return H_TOO_HARD;
  101. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  102. unsigned long hpa = 0;
  103. struct mm_iommu_table_group_mem_t *mem;
  104. long shift = stit->tbl->it_page_shift;
  105. mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
  106. if (!mem)
  107. return H_TOO_HARD;
  108. if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
  109. return H_TOO_HARD;
  110. }
  111. return H_SUCCESS;
  112. }
  113. #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
  114. /* Note on the use of page_address() in real mode,
  115. *
  116. * It is safe to use page_address() in real mode on ppc64 because
  117. * page_address() is always defined as lowmem_page_address()
  118. * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
  119. * operation and does not access page struct.
  120. *
  121. * Theoretically page_address() could be defined different
  122. * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
  123. * would have to be enabled.
  124. * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
  125. * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
  126. * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
  127. * is not expected to be enabled on ppc32, page_address()
  128. * is safe for ppc32 as well.
  129. *
  130. * WARNING: This will be called in real-mode on HV KVM and virtual
  131. * mode on PR KVM
  132. */
  133. static u64 *kvmppc_page_address(struct page *page)
  134. {
  135. #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
  136. #error TODO: fix to avoid page_address() here
  137. #endif
  138. return (u64 *) page_address(page);
  139. }
  140. /*
  141. * Handles TCE requests for emulated devices.
  142. * Puts guest TCE values to the table and expects user space to convert them.
  143. * Called in both real and virtual modes.
  144. * Cannot fail so kvmppc_tce_validate must be called before it.
  145. *
  146. * WARNING: This will be called in real-mode on HV KVM and virtual
  147. * mode on PR KVM
  148. */
  149. void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
  150. unsigned long idx, unsigned long tce)
  151. {
  152. struct page *page;
  153. u64 *tbl;
  154. idx -= stt->offset;
  155. page = stt->pages[idx / TCES_PER_PAGE];
  156. tbl = kvmppc_page_address(page);
  157. tbl[idx % TCES_PER_PAGE] = tce;
  158. }
  159. EXPORT_SYMBOL_GPL(kvmppc_tce_put);
  160. long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
  161. unsigned long *ua, unsigned long **prmap)
  162. {
  163. unsigned long gfn = tce >> PAGE_SHIFT;
  164. struct kvm_memory_slot *memslot;
  165. memslot = search_memslots(kvm_memslots(kvm), gfn);
  166. if (!memslot)
  167. return -EINVAL;
  168. *ua = __gfn_to_hva_memslot(memslot, gfn) |
  169. (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
  170. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  171. if (prmap)
  172. *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
  173. #endif
  174. return 0;
  175. }
  176. EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua);
  177. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  178. static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
  179. unsigned long entry, unsigned long *hpa,
  180. enum dma_data_direction *direction)
  181. {
  182. long ret;
  183. ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
  184. if (!ret && ((*direction == DMA_FROM_DEVICE) ||
  185. (*direction == DMA_BIDIRECTIONAL))) {
  186. __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
  187. /*
  188. * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
  189. * calling this so we still get here a valid UA.
  190. */
  191. if (pua && *pua)
  192. mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
  193. }
  194. return ret;
  195. }
  196. static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
  197. unsigned long entry)
  198. {
  199. unsigned long hpa = 0;
  200. enum dma_data_direction dir = DMA_NONE;
  201. iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
  202. }
  203. static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
  204. struct iommu_table *tbl, unsigned long entry)
  205. {
  206. struct mm_iommu_table_group_mem_t *mem = NULL;
  207. const unsigned long pgsize = 1ULL << tbl->it_page_shift;
  208. __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
  209. if (!pua)
  210. /* it_userspace allocation might be delayed */
  211. return H_TOO_HARD;
  212. mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
  213. if (!mem)
  214. return H_TOO_HARD;
  215. mm_iommu_mapped_dec(mem);
  216. *pua = cpu_to_be64(0);
  217. return H_SUCCESS;
  218. }
  219. static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
  220. struct iommu_table *tbl, unsigned long entry)
  221. {
  222. enum dma_data_direction dir = DMA_NONE;
  223. unsigned long hpa = 0;
  224. long ret;
  225. if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
  226. /*
  227. * real mode xchg can fail if struct page crosses
  228. * a page boundary
  229. */
  230. return H_TOO_HARD;
  231. if (dir == DMA_NONE)
  232. return H_SUCCESS;
  233. ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
  234. if (ret)
  235. iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
  236. return ret;
  237. }
  238. static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
  239. struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
  240. unsigned long entry)
  241. {
  242. unsigned long i, ret = H_SUCCESS;
  243. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  244. unsigned long io_entry = entry * subpages;
  245. for (i = 0; i < subpages; ++i) {
  246. ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
  247. if (ret != H_SUCCESS)
  248. break;
  249. }
  250. return ret;
  251. }
  252. static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
  253. unsigned long entry, unsigned long ua,
  254. enum dma_data_direction dir)
  255. {
  256. long ret;
  257. unsigned long hpa = 0;
  258. __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
  259. struct mm_iommu_table_group_mem_t *mem;
  260. if (!pua)
  261. /* it_userspace allocation might be delayed */
  262. return H_TOO_HARD;
  263. mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
  264. if (!mem)
  265. return H_TOO_HARD;
  266. if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
  267. &hpa)))
  268. return H_TOO_HARD;
  269. if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
  270. return H_TOO_HARD;
  271. ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
  272. if (ret) {
  273. mm_iommu_mapped_dec(mem);
  274. /*
  275. * real mode xchg can fail if struct page crosses
  276. * a page boundary
  277. */
  278. return H_TOO_HARD;
  279. }
  280. if (dir != DMA_NONE)
  281. kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
  282. *pua = cpu_to_be64(ua);
  283. return 0;
  284. }
  285. static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
  286. struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
  287. unsigned long entry, unsigned long ua,
  288. enum dma_data_direction dir)
  289. {
  290. unsigned long i, pgoff, ret = H_SUCCESS;
  291. unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
  292. unsigned long io_entry = entry * subpages;
  293. for (i = 0, pgoff = 0; i < subpages;
  294. ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
  295. ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
  296. io_entry + i, ua + pgoff, dir);
  297. if (ret != H_SUCCESS)
  298. break;
  299. }
  300. return ret;
  301. }
  302. long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  303. unsigned long ioba, unsigned long tce)
  304. {
  305. struct kvmppc_spapr_tce_table *stt;
  306. long ret;
  307. struct kvmppc_spapr_tce_iommu_table *stit;
  308. unsigned long entry, ua = 0;
  309. enum dma_data_direction dir;
  310. /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
  311. /* liobn, ioba, tce); */
  312. /* For radix, we might be in virtual mode, so punt */
  313. if (kvm_is_radix(vcpu->kvm))
  314. return H_TOO_HARD;
  315. stt = kvmppc_find_table(vcpu->kvm, liobn);
  316. if (!stt)
  317. return H_TOO_HARD;
  318. ret = kvmppc_ioba_validate(stt, ioba, 1);
  319. if (ret != H_SUCCESS)
  320. return ret;
  321. ret = kvmppc_rm_tce_validate(stt, tce);
  322. if (ret != H_SUCCESS)
  323. return ret;
  324. dir = iommu_tce_direction(tce);
  325. if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
  326. return H_PARAMETER;
  327. entry = ioba >> stt->page_shift;
  328. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  329. if (dir == DMA_NONE)
  330. ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
  331. stit->tbl, entry);
  332. else
  333. ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
  334. stit->tbl, entry, ua, dir);
  335. if (ret != H_SUCCESS) {
  336. kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
  337. return ret;
  338. }
  339. }
  340. kvmppc_tce_put(stt, entry, tce);
  341. return H_SUCCESS;
  342. }
  343. static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
  344. unsigned long ua, unsigned long *phpa)
  345. {
  346. pte_t *ptep, pte;
  347. unsigned shift = 0;
  348. /*
  349. * Called in real mode with MSR_EE = 0. We are safe here.
  350. * It is ok to do the lookup with arch.pgdir here, because
  351. * we are doing this on secondary cpus and current task there
  352. * is not the hypervisor. Also this is safe against THP in the
  353. * host, because an IPI to primary thread will wait for the secondary
  354. * to exit which will agains result in the below page table walk
  355. * to finish.
  356. */
  357. ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
  358. if (!ptep || !pte_present(*ptep))
  359. return -ENXIO;
  360. pte = *ptep;
  361. if (!shift)
  362. shift = PAGE_SHIFT;
  363. /* Avoid handling anything potentially complicated in realmode */
  364. if (shift > PAGE_SHIFT)
  365. return -EAGAIN;
  366. if (!pte_young(pte))
  367. return -EAGAIN;
  368. *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
  369. (ua & ~PAGE_MASK);
  370. return 0;
  371. }
  372. long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
  373. unsigned long liobn, unsigned long ioba,
  374. unsigned long tce_list, unsigned long npages)
  375. {
  376. struct kvmppc_spapr_tce_table *stt;
  377. long i, ret = H_SUCCESS;
  378. unsigned long tces, entry, ua = 0;
  379. unsigned long *rmap = NULL;
  380. bool prereg = false;
  381. struct kvmppc_spapr_tce_iommu_table *stit;
  382. /* For radix, we might be in virtual mode, so punt */
  383. if (kvm_is_radix(vcpu->kvm))
  384. return H_TOO_HARD;
  385. stt = kvmppc_find_table(vcpu->kvm, liobn);
  386. if (!stt)
  387. return H_TOO_HARD;
  388. entry = ioba >> stt->page_shift;
  389. /*
  390. * The spec says that the maximum size of the list is 512 TCEs
  391. * so the whole table addressed resides in 4K page
  392. */
  393. if (npages > 512)
  394. return H_PARAMETER;
  395. if (tce_list & (SZ_4K - 1))
  396. return H_PARAMETER;
  397. ret = kvmppc_ioba_validate(stt, ioba, npages);
  398. if (ret != H_SUCCESS)
  399. return ret;
  400. if (mm_iommu_preregistered(vcpu->kvm->mm)) {
  401. /*
  402. * We get here if guest memory was pre-registered which
  403. * is normally VFIO case and gpa->hpa translation does not
  404. * depend on hpt.
  405. */
  406. struct mm_iommu_table_group_mem_t *mem;
  407. if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
  408. return H_TOO_HARD;
  409. mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
  410. if (mem)
  411. prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
  412. IOMMU_PAGE_SHIFT_4K, &tces) == 0;
  413. }
  414. if (!prereg) {
  415. /*
  416. * This is usually a case of a guest with emulated devices only
  417. * when TCE list is not in preregistered memory.
  418. * We do not require memory to be preregistered in this case
  419. * so lock rmap and do __find_linux_pte_or_hugepte().
  420. */
  421. if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
  422. return H_TOO_HARD;
  423. rmap = (void *) vmalloc_to_phys(rmap);
  424. if (WARN_ON_ONCE_RM(!rmap))
  425. return H_TOO_HARD;
  426. /*
  427. * Synchronize with the MMU notifier callbacks in
  428. * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
  429. * While we have the rmap lock, code running on other CPUs
  430. * cannot finish unmapping the host real page that backs
  431. * this guest real page, so we are OK to access the host
  432. * real page.
  433. */
  434. lock_rmap(rmap);
  435. if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
  436. ret = H_TOO_HARD;
  437. goto unlock_exit;
  438. }
  439. }
  440. for (i = 0; i < npages; ++i) {
  441. unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
  442. ret = kvmppc_rm_tce_validate(stt, tce);
  443. if (ret != H_SUCCESS)
  444. goto unlock_exit;
  445. }
  446. for (i = 0; i < npages; ++i) {
  447. unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
  448. ua = 0;
  449. if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
  450. return H_PARAMETER;
  451. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  452. ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
  453. stit->tbl, entry + i, ua,
  454. iommu_tce_direction(tce));
  455. if (ret != H_SUCCESS) {
  456. kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
  457. entry);
  458. goto unlock_exit;
  459. }
  460. }
  461. kvmppc_tce_put(stt, entry + i, tce);
  462. }
  463. unlock_exit:
  464. if (rmap)
  465. unlock_rmap(rmap);
  466. return ret;
  467. }
  468. long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
  469. unsigned long liobn, unsigned long ioba,
  470. unsigned long tce_value, unsigned long npages)
  471. {
  472. struct kvmppc_spapr_tce_table *stt;
  473. long i, ret;
  474. struct kvmppc_spapr_tce_iommu_table *stit;
  475. /* For radix, we might be in virtual mode, so punt */
  476. if (kvm_is_radix(vcpu->kvm))
  477. return H_TOO_HARD;
  478. stt = kvmppc_find_table(vcpu->kvm, liobn);
  479. if (!stt)
  480. return H_TOO_HARD;
  481. ret = kvmppc_ioba_validate(stt, ioba, npages);
  482. if (ret != H_SUCCESS)
  483. return ret;
  484. /* Check permission bits only to allow userspace poison TCE for debug */
  485. if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
  486. return H_PARAMETER;
  487. list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
  488. unsigned long entry = ioba >> stt->page_shift;
  489. for (i = 0; i < npages; ++i) {
  490. ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
  491. stit->tbl, entry + i);
  492. if (ret == H_SUCCESS)
  493. continue;
  494. if (ret == H_TOO_HARD)
  495. return ret;
  496. WARN_ON_ONCE_RM(1);
  497. kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
  498. }
  499. }
  500. for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
  501. kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
  502. return H_SUCCESS;
  503. }
  504. /* This can be called in either virtual mode or real mode */
  505. long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
  506. unsigned long ioba)
  507. {
  508. struct kvmppc_spapr_tce_table *stt;
  509. long ret;
  510. unsigned long idx;
  511. struct page *page;
  512. u64 *tbl;
  513. stt = kvmppc_find_table(vcpu->kvm, liobn);
  514. if (!stt)
  515. return H_TOO_HARD;
  516. ret = kvmppc_ioba_validate(stt, ioba, 1);
  517. if (ret != H_SUCCESS)
  518. return ret;
  519. idx = (ioba >> stt->page_shift) - stt->offset;
  520. page = stt->pages[idx / TCES_PER_PAGE];
  521. tbl = (u64 *)page_address(page);
  522. vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
  523. return H_SUCCESS;
  524. }
  525. EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
  526. #endif /* KVM_BOOK3S_HV_POSSIBLE */