book3s_hv_builtin.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807
  1. /*
  2. * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License, version 2, as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/cpu.h>
  9. #include <linux/kvm_host.h>
  10. #include <linux/preempt.h>
  11. #include <linux/export.h>
  12. #include <linux/sched.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/init.h>
  15. #include <linux/memblock.h>
  16. #include <linux/sizes.h>
  17. #include <linux/cma.h>
  18. #include <linux/bitops.h>
  19. #include <asm/asm-prototypes.h>
  20. #include <asm/cputable.h>
  21. #include <asm/kvm_ppc.h>
  22. #include <asm/kvm_book3s.h>
  23. #include <asm/archrandom.h>
  24. #include <asm/xics.h>
  25. #include <asm/xive.h>
  26. #include <asm/dbell.h>
  27. #include <asm/cputhreads.h>
  28. #include <asm/io.h>
  29. #include <asm/opal.h>
  30. #include <asm/smp.h>
  31. #define KVM_CMA_CHUNK_ORDER 18
  32. #include "book3s_xics.h"
  33. #include "book3s_xive.h"
  34. /*
  35. * The XIVE module will populate these when it loads
  36. */
  37. unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
  38. unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
  39. int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
  40. unsigned long mfrr);
  41. int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
  42. int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
  43. EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
  44. EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
  45. EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
  46. EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
  47. EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
  48. /*
  49. * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
  50. * should be power of 2.
  51. */
  52. #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
  53. /*
  54. * By default we reserve 5% of memory for hash pagetable allocation.
  55. */
  56. static unsigned long kvm_cma_resv_ratio = 5;
  57. static struct cma *kvm_cma;
  58. static int __init early_parse_kvm_cma_resv(char *p)
  59. {
  60. pr_debug("%s(%s)\n", __func__, p);
  61. if (!p)
  62. return -EINVAL;
  63. return kstrtoul(p, 0, &kvm_cma_resv_ratio);
  64. }
  65. early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
  66. struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
  67. {
  68. VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
  69. return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
  70. false);
  71. }
  72. EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
  73. void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
  74. {
  75. cma_release(kvm_cma, page, nr_pages);
  76. }
  77. EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
  78. /**
  79. * kvm_cma_reserve() - reserve area for kvm hash pagetable
  80. *
  81. * This function reserves memory from early allocator. It should be
  82. * called by arch specific code once the memblock allocator
  83. * has been activated and all other subsystems have already allocated/reserved
  84. * memory.
  85. */
  86. void __init kvm_cma_reserve(void)
  87. {
  88. unsigned long align_size;
  89. struct memblock_region *reg;
  90. phys_addr_t selected_size = 0;
  91. /*
  92. * We need CMA reservation only when we are in HV mode
  93. */
  94. if (!cpu_has_feature(CPU_FTR_HVMODE))
  95. return;
  96. /*
  97. * We cannot use memblock_phys_mem_size() here, because
  98. * memblock_analyze() has not been called yet.
  99. */
  100. for_each_memblock(memory, reg)
  101. selected_size += memblock_region_memory_end_pfn(reg) -
  102. memblock_region_memory_base_pfn(reg);
  103. selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
  104. if (selected_size) {
  105. pr_debug("%s: reserving %ld MiB for global area\n", __func__,
  106. (unsigned long)selected_size / SZ_1M);
  107. align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
  108. cma_declare_contiguous(0, selected_size, 0, align_size,
  109. KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
  110. &kvm_cma);
  111. }
  112. }
  113. /*
  114. * Real-mode H_CONFER implementation.
  115. * We check if we are the only vcpu out of this virtual core
  116. * still running in the guest and not ceded. If so, we pop up
  117. * to the virtual-mode implementation; if not, just return to
  118. * the guest.
  119. */
  120. long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
  121. unsigned int yield_count)
  122. {
  123. struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
  124. int ptid = local_paca->kvm_hstate.ptid;
  125. int threads_running;
  126. int threads_ceded;
  127. int threads_conferring;
  128. u64 stop = get_tb() + 10 * tb_ticks_per_usec;
  129. int rv = H_SUCCESS; /* => don't yield */
  130. set_bit(ptid, &vc->conferring_threads);
  131. while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
  132. threads_running = VCORE_ENTRY_MAP(vc);
  133. threads_ceded = vc->napping_threads;
  134. threads_conferring = vc->conferring_threads;
  135. if ((threads_ceded | threads_conferring) == threads_running) {
  136. rv = H_TOO_HARD; /* => do yield */
  137. break;
  138. }
  139. }
  140. clear_bit(ptid, &vc->conferring_threads);
  141. return rv;
  142. }
  143. /*
  144. * When running HV mode KVM we need to block certain operations while KVM VMs
  145. * exist in the system. We use a counter of VMs to track this.
  146. *
  147. * One of the operations we need to block is onlining of secondaries, so we
  148. * protect hv_vm_count with get/put_online_cpus().
  149. */
  150. static atomic_t hv_vm_count;
  151. void kvm_hv_vm_activated(void)
  152. {
  153. get_online_cpus();
  154. atomic_inc(&hv_vm_count);
  155. put_online_cpus();
  156. }
  157. EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
  158. void kvm_hv_vm_deactivated(void)
  159. {
  160. get_online_cpus();
  161. atomic_dec(&hv_vm_count);
  162. put_online_cpus();
  163. }
  164. EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
  165. bool kvm_hv_mode_active(void)
  166. {
  167. return atomic_read(&hv_vm_count) != 0;
  168. }
  169. extern int hcall_real_table[], hcall_real_table_end[];
  170. int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
  171. {
  172. cmd /= 4;
  173. if (cmd < hcall_real_table_end - hcall_real_table &&
  174. hcall_real_table[cmd])
  175. return 1;
  176. return 0;
  177. }
  178. EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
  179. int kvmppc_hwrng_present(void)
  180. {
  181. return powernv_hwrng_present();
  182. }
  183. EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
  184. long kvmppc_h_random(struct kvm_vcpu *vcpu)
  185. {
  186. int r;
  187. /* Only need to do the expensive mfmsr() on radix */
  188. if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
  189. r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
  190. else
  191. r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
  192. if (r)
  193. return H_SUCCESS;
  194. return H_HARDWARE;
  195. }
  196. /*
  197. * Send an interrupt or message to another CPU.
  198. * The caller needs to include any barrier needed to order writes
  199. * to memory vs. the IPI/message.
  200. */
  201. void kvmhv_rm_send_ipi(int cpu)
  202. {
  203. void __iomem *xics_phys;
  204. unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
  205. /* For a nested hypervisor, use the XICS via hcall */
  206. if (kvmhv_on_pseries()) {
  207. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  208. plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
  209. IPI_PRIORITY);
  210. return;
  211. }
  212. /* On POWER9 we can use msgsnd for any destination cpu. */
  213. if (cpu_has_feature(CPU_FTR_ARCH_300)) {
  214. msg |= get_hard_smp_processor_id(cpu);
  215. __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
  216. return;
  217. }
  218. /* On POWER8 for IPIs to threads in the same core, use msgsnd. */
  219. if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
  220. cpu_first_thread_sibling(cpu) ==
  221. cpu_first_thread_sibling(raw_smp_processor_id())) {
  222. msg |= cpu_thread_in_core(cpu);
  223. __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
  224. return;
  225. }
  226. /* We should never reach this */
  227. if (WARN_ON_ONCE(xive_enabled()))
  228. return;
  229. /* Else poke the target with an IPI */
  230. xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
  231. if (xics_phys)
  232. __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
  233. else
  234. opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
  235. }
  236. /*
  237. * The following functions are called from the assembly code
  238. * in book3s_hv_rmhandlers.S.
  239. */
  240. static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
  241. {
  242. int cpu = vc->pcpu;
  243. /* Order setting of exit map vs. msgsnd/IPI */
  244. smp_mb();
  245. for (; active; active >>= 1, ++cpu)
  246. if (active & 1)
  247. kvmhv_rm_send_ipi(cpu);
  248. }
  249. void kvmhv_commence_exit(int trap)
  250. {
  251. struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
  252. int ptid = local_paca->kvm_hstate.ptid;
  253. struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
  254. int me, ee, i, t;
  255. int cpu0;
  256. /* Set our bit in the threads-exiting-guest map in the 0xff00
  257. bits of vcore->entry_exit_map */
  258. me = 0x100 << ptid;
  259. do {
  260. ee = vc->entry_exit_map;
  261. } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
  262. /* Are we the first here? */
  263. if ((ee >> 8) != 0)
  264. return;
  265. /*
  266. * Trigger the other threads in this vcore to exit the guest.
  267. * If this is a hypervisor decrementer interrupt then they
  268. * will be already on their way out of the guest.
  269. */
  270. if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
  271. kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
  272. /*
  273. * If we are doing dynamic micro-threading, interrupt the other
  274. * subcores to pull them out of their guests too.
  275. */
  276. if (!sip)
  277. return;
  278. for (i = 0; i < MAX_SUBCORES; ++i) {
  279. vc = sip->vc[i];
  280. if (!vc)
  281. break;
  282. do {
  283. ee = vc->entry_exit_map;
  284. /* Already asked to exit? */
  285. if ((ee >> 8) != 0)
  286. break;
  287. } while (cmpxchg(&vc->entry_exit_map, ee,
  288. ee | VCORE_EXIT_REQ) != ee);
  289. if ((ee >> 8) == 0)
  290. kvmhv_interrupt_vcore(vc, ee);
  291. }
  292. /*
  293. * On POWER9 when running a HPT guest on a radix host (sip != NULL),
  294. * we have to interrupt inactive CPU threads to get them to
  295. * restore the host LPCR value.
  296. */
  297. if (sip->lpcr_req) {
  298. if (cmpxchg(&sip->do_restore, 0, 1) == 0) {
  299. vc = local_paca->kvm_hstate.kvm_vcore;
  300. cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid;
  301. for (t = 1; t < threads_per_core; ++t) {
  302. if (sip->napped[t])
  303. kvmhv_rm_send_ipi(cpu0 + t);
  304. }
  305. }
  306. }
  307. }
  308. struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
  309. EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
  310. #ifdef CONFIG_KVM_XICS
  311. static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
  312. u32 xisr)
  313. {
  314. int i;
  315. /*
  316. * We access the mapped array here without a lock. That
  317. * is safe because we never reduce the number of entries
  318. * in the array and we never change the v_hwirq field of
  319. * an entry once it is set.
  320. *
  321. * We have also carefully ordered the stores in the writer
  322. * and the loads here in the reader, so that if we find a matching
  323. * hwirq here, the associated GSI and irq_desc fields are valid.
  324. */
  325. for (i = 0; i < pimap->n_mapped; i++) {
  326. if (xisr == pimap->mapped[i].r_hwirq) {
  327. /*
  328. * Order subsequent reads in the caller to serialize
  329. * with the writer.
  330. */
  331. smp_rmb();
  332. return &pimap->mapped[i];
  333. }
  334. }
  335. return NULL;
  336. }
  337. /*
  338. * If we have an interrupt that's not an IPI, check if we have a
  339. * passthrough adapter and if so, check if this external interrupt
  340. * is for the adapter.
  341. * We will attempt to deliver the IRQ directly to the target VCPU's
  342. * ICP, the virtual ICP (based on affinity - the xive value in ICS).
  343. *
  344. * If the delivery fails or if this is not for a passthrough adapter,
  345. * return to the host to handle this interrupt. We earlier
  346. * saved a copy of the XIRR in the PACA, it will be picked up by
  347. * the host ICP driver.
  348. */
  349. static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
  350. {
  351. struct kvmppc_passthru_irqmap *pimap;
  352. struct kvmppc_irq_map *irq_map;
  353. struct kvm_vcpu *vcpu;
  354. vcpu = local_paca->kvm_hstate.kvm_vcpu;
  355. if (!vcpu)
  356. return 1;
  357. pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
  358. if (!pimap)
  359. return 1;
  360. irq_map = get_irqmap(pimap, xisr);
  361. if (!irq_map)
  362. return 1;
  363. /* We're handling this interrupt, generic code doesn't need to */
  364. local_paca->kvm_hstate.saved_xirr = 0;
  365. return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
  366. }
  367. #else
  368. static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
  369. {
  370. return 1;
  371. }
  372. #endif
  373. /*
  374. * Determine what sort of external interrupt is pending (if any).
  375. * Returns:
  376. * 0 if no interrupt is pending
  377. * 1 if an interrupt is pending that needs to be handled by the host
  378. * 2 Passthrough that needs completion in the host
  379. * -1 if there was a guest wakeup IPI (which has now been cleared)
  380. * -2 if there is PCI passthrough external interrupt that was handled
  381. */
  382. static long kvmppc_read_one_intr(bool *again);
  383. long kvmppc_read_intr(void)
  384. {
  385. long ret = 0;
  386. long rc;
  387. bool again;
  388. if (xive_enabled())
  389. return 1;
  390. do {
  391. again = false;
  392. rc = kvmppc_read_one_intr(&again);
  393. if (rc && (ret == 0 || rc > ret))
  394. ret = rc;
  395. } while (again);
  396. return ret;
  397. }
  398. static long kvmppc_read_one_intr(bool *again)
  399. {
  400. void __iomem *xics_phys;
  401. u32 h_xirr;
  402. __be32 xirr;
  403. u32 xisr;
  404. u8 host_ipi;
  405. int64_t rc;
  406. if (xive_enabled())
  407. return 1;
  408. /* see if a host IPI is pending */
  409. host_ipi = local_paca->kvm_hstate.host_ipi;
  410. if (host_ipi)
  411. return 1;
  412. /* Now read the interrupt from the ICP */
  413. if (kvmhv_on_pseries()) {
  414. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  415. rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
  416. xirr = cpu_to_be32(retbuf[0]);
  417. } else {
  418. xics_phys = local_paca->kvm_hstate.xics_phys;
  419. rc = 0;
  420. if (!xics_phys)
  421. rc = opal_int_get_xirr(&xirr, false);
  422. else
  423. xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
  424. }
  425. if (rc < 0)
  426. return 1;
  427. /*
  428. * Save XIRR for later. Since we get control in reverse endian
  429. * on LE systems, save it byte reversed and fetch it back in
  430. * host endian. Note that xirr is the value read from the
  431. * XIRR register, while h_xirr is the host endian version.
  432. */
  433. h_xirr = be32_to_cpu(xirr);
  434. local_paca->kvm_hstate.saved_xirr = h_xirr;
  435. xisr = h_xirr & 0xffffff;
  436. /*
  437. * Ensure that the store/load complete to guarantee all side
  438. * effects of loading from XIRR has completed
  439. */
  440. smp_mb();
  441. /* if nothing pending in the ICP */
  442. if (!xisr)
  443. return 0;
  444. /* We found something in the ICP...
  445. *
  446. * If it is an IPI, clear the MFRR and EOI it.
  447. */
  448. if (xisr == XICS_IPI) {
  449. rc = 0;
  450. if (kvmhv_on_pseries()) {
  451. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  452. plpar_hcall_raw(H_IPI, retbuf,
  453. hard_smp_processor_id(), 0xff);
  454. plpar_hcall_raw(H_EOI, retbuf, h_xirr);
  455. } else if (xics_phys) {
  456. __raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
  457. __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
  458. } else {
  459. opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
  460. rc = opal_int_eoi(h_xirr);
  461. }
  462. /* If rc > 0, there is another interrupt pending */
  463. *again = rc > 0;
  464. /*
  465. * Need to ensure side effects of above stores
  466. * complete before proceeding.
  467. */
  468. smp_mb();
  469. /*
  470. * We need to re-check host IPI now in case it got set in the
  471. * meantime. If it's clear, we bounce the interrupt to the
  472. * guest
  473. */
  474. host_ipi = local_paca->kvm_hstate.host_ipi;
  475. if (unlikely(host_ipi != 0)) {
  476. /* We raced with the host,
  477. * we need to resend that IPI, bummer
  478. */
  479. if (kvmhv_on_pseries()) {
  480. unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
  481. plpar_hcall_raw(H_IPI, retbuf,
  482. hard_smp_processor_id(),
  483. IPI_PRIORITY);
  484. } else if (xics_phys)
  485. __raw_rm_writeb(IPI_PRIORITY,
  486. xics_phys + XICS_MFRR);
  487. else
  488. opal_int_set_mfrr(hard_smp_processor_id(),
  489. IPI_PRIORITY);
  490. /* Let side effects complete */
  491. smp_mb();
  492. return 1;
  493. }
  494. /* OK, it's an IPI for us */
  495. local_paca->kvm_hstate.saved_xirr = 0;
  496. return -1;
  497. }
  498. return kvmppc_check_passthru(xisr, xirr, again);
  499. }
  500. #ifdef CONFIG_KVM_XICS
  501. static inline bool is_rm(void)
  502. {
  503. return !(mfmsr() & MSR_DR);
  504. }
  505. unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
  506. {
  507. if (!kvmppc_xics_enabled(vcpu))
  508. return H_TOO_HARD;
  509. if (xive_enabled()) {
  510. if (is_rm())
  511. return xive_rm_h_xirr(vcpu);
  512. if (unlikely(!__xive_vm_h_xirr))
  513. return H_NOT_AVAILABLE;
  514. return __xive_vm_h_xirr(vcpu);
  515. } else
  516. return xics_rm_h_xirr(vcpu);
  517. }
  518. unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
  519. {
  520. if (!kvmppc_xics_enabled(vcpu))
  521. return H_TOO_HARD;
  522. vcpu->arch.regs.gpr[5] = get_tb();
  523. if (xive_enabled()) {
  524. if (is_rm())
  525. return xive_rm_h_xirr(vcpu);
  526. if (unlikely(!__xive_vm_h_xirr))
  527. return H_NOT_AVAILABLE;
  528. return __xive_vm_h_xirr(vcpu);
  529. } else
  530. return xics_rm_h_xirr(vcpu);
  531. }
  532. unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
  533. {
  534. if (!kvmppc_xics_enabled(vcpu))
  535. return H_TOO_HARD;
  536. if (xive_enabled()) {
  537. if (is_rm())
  538. return xive_rm_h_ipoll(vcpu, server);
  539. if (unlikely(!__xive_vm_h_ipoll))
  540. return H_NOT_AVAILABLE;
  541. return __xive_vm_h_ipoll(vcpu, server);
  542. } else
  543. return H_TOO_HARD;
  544. }
  545. int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
  546. unsigned long mfrr)
  547. {
  548. if (!kvmppc_xics_enabled(vcpu))
  549. return H_TOO_HARD;
  550. if (xive_enabled()) {
  551. if (is_rm())
  552. return xive_rm_h_ipi(vcpu, server, mfrr);
  553. if (unlikely(!__xive_vm_h_ipi))
  554. return H_NOT_AVAILABLE;
  555. return __xive_vm_h_ipi(vcpu, server, mfrr);
  556. } else
  557. return xics_rm_h_ipi(vcpu, server, mfrr);
  558. }
  559. int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
  560. {
  561. if (!kvmppc_xics_enabled(vcpu))
  562. return H_TOO_HARD;
  563. if (xive_enabled()) {
  564. if (is_rm())
  565. return xive_rm_h_cppr(vcpu, cppr);
  566. if (unlikely(!__xive_vm_h_cppr))
  567. return H_NOT_AVAILABLE;
  568. return __xive_vm_h_cppr(vcpu, cppr);
  569. } else
  570. return xics_rm_h_cppr(vcpu, cppr);
  571. }
  572. int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
  573. {
  574. if (!kvmppc_xics_enabled(vcpu))
  575. return H_TOO_HARD;
  576. if (xive_enabled()) {
  577. if (is_rm())
  578. return xive_rm_h_eoi(vcpu, xirr);
  579. if (unlikely(!__xive_vm_h_eoi))
  580. return H_NOT_AVAILABLE;
  581. return __xive_vm_h_eoi(vcpu, xirr);
  582. } else
  583. return xics_rm_h_eoi(vcpu, xirr);
  584. }
  585. #endif /* CONFIG_KVM_XICS */
  586. void kvmppc_bad_interrupt(struct pt_regs *regs)
  587. {
  588. /*
  589. * 100 could happen at any time, 200 can happen due to invalid real
  590. * address access for example (or any time due to a hardware problem).
  591. */
  592. if (TRAP(regs) == 0x100) {
  593. get_paca()->in_nmi++;
  594. system_reset_exception(regs);
  595. get_paca()->in_nmi--;
  596. } else if (TRAP(regs) == 0x200) {
  597. machine_check_exception(regs);
  598. } else {
  599. die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
  600. }
  601. panic("Bad KVM trap");
  602. }
  603. /*
  604. * Functions used to switch LPCR HR and UPRT bits on all threads
  605. * when entering and exiting HPT guests on a radix host.
  606. */
  607. #define PHASE_REALMODE 1 /* in real mode */
  608. #define PHASE_SET_LPCR 2 /* have set LPCR */
  609. #define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */
  610. #define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */
  611. #define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
  612. static void wait_for_sync(struct kvm_split_mode *sip, int phase)
  613. {
  614. int thr = local_paca->kvm_hstate.tid;
  615. sip->lpcr_sync.phase[thr] |= phase;
  616. phase = ALL(phase);
  617. while ((sip->lpcr_sync.allphases & phase) != phase) {
  618. HMT_low();
  619. barrier();
  620. }
  621. HMT_medium();
  622. }
  623. void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip)
  624. {
  625. unsigned long rb, set;
  626. /* wait for every other thread to get to real mode */
  627. wait_for_sync(sip, PHASE_REALMODE);
  628. /* Set LPCR and LPIDR */
  629. mtspr(SPRN_LPCR, sip->lpcr_req);
  630. mtspr(SPRN_LPID, sip->lpidr_req);
  631. isync();
  632. /* Invalidate the TLB on thread 0 */
  633. if (local_paca->kvm_hstate.tid == 0) {
  634. sip->do_set = 0;
  635. asm volatile("ptesync" : : : "memory");
  636. for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) {
  637. rb = TLBIEL_INVAL_SET_LPID +
  638. (set << TLBIEL_INVAL_SET_SHIFT);
  639. asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
  640. "r" (rb), "r" (0));
  641. }
  642. asm volatile("ptesync" : : : "memory");
  643. }
  644. /* indicate that we have done so and wait for others */
  645. wait_for_sync(sip, PHASE_SET_LPCR);
  646. /* order read of sip->lpcr_sync.allphases vs. sip->do_set */
  647. smp_rmb();
  648. }
  649. /*
  650. * Called when a thread that has been in the guest needs
  651. * to reload the host LPCR value - but only on POWER9 when
  652. * running a HPT guest on a radix host.
  653. */
  654. void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
  655. {
  656. /* we're out of the guest... */
  657. wait_for_sync(sip, PHASE_OUT_OF_GUEST);
  658. mtspr(SPRN_LPID, 0);
  659. mtspr(SPRN_LPCR, sip->host_lpcr);
  660. isync();
  661. if (local_paca->kvm_hstate.tid == 0) {
  662. sip->do_restore = 0;
  663. smp_wmb(); /* order store of do_restore vs. phase */
  664. }
  665. wait_for_sync(sip, PHASE_RESET_LPCR);
  666. smp_mb();
  667. local_paca->kvm_hstate.kvm_split_mode = NULL;
  668. }
  669. /*
  670. * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
  671. * Can we inject a Decrementer or a External interrupt?
  672. */
  673. void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
  674. {
  675. int ext;
  676. unsigned long vec = 0;
  677. unsigned long lpcr;
  678. /* Insert EXTERNAL bit into LPCR at the MER bit position */
  679. ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
  680. lpcr = mfspr(SPRN_LPCR);
  681. lpcr |= ext << LPCR_MER_SH;
  682. mtspr(SPRN_LPCR, lpcr);
  683. isync();
  684. if (vcpu->arch.shregs.msr & MSR_EE) {
  685. if (ext) {
  686. vec = BOOK3S_INTERRUPT_EXTERNAL;
  687. } else {
  688. long int dec = mfspr(SPRN_DEC);
  689. if (!(lpcr & LPCR_LD))
  690. dec = (int) dec;
  691. if (dec < 0)
  692. vec = BOOK3S_INTERRUPT_DECREMENTER;
  693. }
  694. }
  695. if (vec) {
  696. unsigned long msr, old_msr = vcpu->arch.shregs.msr;
  697. kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
  698. kvmppc_set_srr1(vcpu, old_msr);
  699. kvmppc_set_pc(vcpu, vec);
  700. msr = vcpu->arch.intr_msr;
  701. if (MSR_TM_ACTIVE(old_msr))
  702. msr |= MSR_TS_S;
  703. vcpu->arch.shregs.msr = msr;
  704. }
  705. if (vcpu->arch.doorbell_request) {
  706. mtspr(SPRN_DPDES, 1);
  707. vcpu->arch.vcore->dpdes = 1;
  708. smp_wmb();
  709. vcpu->arch.doorbell_request = 0;
  710. }
  711. }