kvm.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637
  1. /*
  2. * KVM paravirt_ops implementation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. *
  18. * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  19. * Copyright IBM Corporation, 2007
  20. * Authors: Anthony Liguori <aliguori@us.ibm.com>
  21. */
  22. #include <linux/context_tracking.h>
  23. #include <linux/init.h>
  24. #include <linux/kernel.h>
  25. #include <linux/kvm_para.h>
  26. #include <linux/cpu.h>
  27. #include <linux/mm.h>
  28. #include <linux/highmem.h>
  29. #include <linux/hardirq.h>
  30. #include <linux/notifier.h>
  31. #include <linux/reboot.h>
  32. #include <linux/hash.h>
  33. #include <linux/sched.h>
  34. #include <linux/slab.h>
  35. #include <linux/kprobes.h>
  36. #include <linux/debugfs.h>
  37. #include <linux/nmi.h>
  38. #include <linux/swait.h>
  39. #include <asm/timer.h>
  40. #include <asm/cpu.h>
  41. #include <asm/traps.h>
  42. #include <asm/desc.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/apic.h>
  45. #include <asm/apicdef.h>
  46. #include <asm/hypervisor.h>
  47. #include <asm/kvm_guest.h>
  48. static int kvmapf = 1;
  49. static int parse_no_kvmapf(char *arg)
  50. {
  51. kvmapf = 0;
  52. return 0;
  53. }
  54. early_param("no-kvmapf", parse_no_kvmapf);
  55. static int steal_acc = 1;
  56. static int parse_no_stealacc(char *arg)
  57. {
  58. steal_acc = 0;
  59. return 0;
  60. }
  61. early_param("no-steal-acc", parse_no_stealacc);
  62. static int kvmclock_vsyscall = 1;
  63. static int parse_no_kvmclock_vsyscall(char *arg)
  64. {
  65. kvmclock_vsyscall = 0;
  66. return 0;
  67. }
  68. early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
  69. static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
  70. static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
  71. static int has_steal_clock = 0;
  72. /*
  73. * No need for any "IO delay" on KVM
  74. */
  75. static void kvm_io_delay(void)
  76. {
  77. }
  78. #define KVM_TASK_SLEEP_HASHBITS 8
  79. #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
  80. struct kvm_task_sleep_node {
  81. struct hlist_node link;
  82. struct swait_queue_head wq;
  83. u32 token;
  84. int cpu;
  85. bool halted;
  86. };
  87. static struct kvm_task_sleep_head {
  88. raw_spinlock_t lock;
  89. struct hlist_head list;
  90. } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
  91. static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
  92. u32 token)
  93. {
  94. struct hlist_node *p;
  95. hlist_for_each(p, &b->list) {
  96. struct kvm_task_sleep_node *n =
  97. hlist_entry(p, typeof(*n), link);
  98. if (n->token == token)
  99. return n;
  100. }
  101. return NULL;
  102. }
  103. void kvm_async_pf_task_wait(u32 token)
  104. {
  105. u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
  106. struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
  107. struct kvm_task_sleep_node n, *e;
  108. DECLARE_SWAITQUEUE(wait);
  109. rcu_irq_enter();
  110. raw_spin_lock(&b->lock);
  111. e = _find_apf_task(b, token);
  112. if (e) {
  113. /* dummy entry exist -> wake up was delivered ahead of PF */
  114. hlist_del(&e->link);
  115. kfree(e);
  116. raw_spin_unlock(&b->lock);
  117. rcu_irq_exit();
  118. return;
  119. }
  120. n.token = token;
  121. n.cpu = smp_processor_id();
  122. n.halted = is_idle_task(current) || preempt_count() > 1;
  123. init_swait_queue_head(&n.wq);
  124. hlist_add_head(&n.link, &b->list);
  125. raw_spin_unlock(&b->lock);
  126. for (;;) {
  127. if (!n.halted)
  128. prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
  129. if (hlist_unhashed(&n.link))
  130. break;
  131. if (!n.halted) {
  132. local_irq_enable();
  133. schedule();
  134. local_irq_disable();
  135. } else {
  136. /*
  137. * We cannot reschedule. So halt.
  138. */
  139. rcu_irq_exit();
  140. native_safe_halt();
  141. rcu_irq_enter();
  142. local_irq_disable();
  143. }
  144. }
  145. if (!n.halted)
  146. finish_swait(&n.wq, &wait);
  147. rcu_irq_exit();
  148. return;
  149. }
  150. EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
  151. static void apf_task_wake_one(struct kvm_task_sleep_node *n)
  152. {
  153. hlist_del_init(&n->link);
  154. if (n->halted)
  155. smp_send_reschedule(n->cpu);
  156. else if (swait_active(&n->wq))
  157. swake_up(&n->wq);
  158. }
  159. static void apf_task_wake_all(void)
  160. {
  161. int i;
  162. for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
  163. struct hlist_node *p, *next;
  164. struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
  165. raw_spin_lock(&b->lock);
  166. hlist_for_each_safe(p, next, &b->list) {
  167. struct kvm_task_sleep_node *n =
  168. hlist_entry(p, typeof(*n), link);
  169. if (n->cpu == smp_processor_id())
  170. apf_task_wake_one(n);
  171. }
  172. raw_spin_unlock(&b->lock);
  173. }
  174. }
  175. void kvm_async_pf_task_wake(u32 token)
  176. {
  177. u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
  178. struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
  179. struct kvm_task_sleep_node *n;
  180. if (token == ~0) {
  181. apf_task_wake_all();
  182. return;
  183. }
  184. again:
  185. raw_spin_lock(&b->lock);
  186. n = _find_apf_task(b, token);
  187. if (!n) {
  188. /*
  189. * async PF was not yet handled.
  190. * Add dummy entry for the token.
  191. */
  192. n = kzalloc(sizeof(*n), GFP_ATOMIC);
  193. if (!n) {
  194. /*
  195. * Allocation failed! Busy wait while other cpu
  196. * handles async PF.
  197. */
  198. raw_spin_unlock(&b->lock);
  199. cpu_relax();
  200. goto again;
  201. }
  202. n->token = token;
  203. n->cpu = smp_processor_id();
  204. init_swait_queue_head(&n->wq);
  205. hlist_add_head(&n->link, &b->list);
  206. } else
  207. apf_task_wake_one(n);
  208. raw_spin_unlock(&b->lock);
  209. return;
  210. }
  211. EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
  212. u32 kvm_read_and_reset_pf_reason(void)
  213. {
  214. u32 reason = 0;
  215. if (__this_cpu_read(apf_reason.enabled)) {
  216. reason = __this_cpu_read(apf_reason.reason);
  217. __this_cpu_write(apf_reason.reason, 0);
  218. }
  219. return reason;
  220. }
  221. EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
  222. NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
  223. dotraplinkage void
  224. do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
  225. {
  226. enum ctx_state prev_state;
  227. switch (kvm_read_and_reset_pf_reason()) {
  228. default:
  229. trace_do_page_fault(regs, error_code);
  230. break;
  231. case KVM_PV_REASON_PAGE_NOT_PRESENT:
  232. /* page is swapped out by the host. */
  233. prev_state = exception_enter();
  234. kvm_async_pf_task_wait((u32)read_cr2());
  235. exception_exit(prev_state);
  236. break;
  237. case KVM_PV_REASON_PAGE_READY:
  238. rcu_irq_enter();
  239. kvm_async_pf_task_wake((u32)read_cr2());
  240. rcu_irq_exit();
  241. break;
  242. }
  243. }
  244. NOKPROBE_SYMBOL(do_async_page_fault);
  245. static void __init paravirt_ops_setup(void)
  246. {
  247. pv_info.name = "KVM";
  248. if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
  249. pv_cpu_ops.io_delay = kvm_io_delay;
  250. #ifdef CONFIG_X86_IO_APIC
  251. no_timer_check = 1;
  252. #endif
  253. }
  254. static void kvm_register_steal_time(void)
  255. {
  256. int cpu = smp_processor_id();
  257. struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
  258. if (!has_steal_clock)
  259. return;
  260. wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
  261. pr_info("kvm-stealtime: cpu %d, msr %llx\n",
  262. cpu, (unsigned long long) slow_virt_to_phys(st));
  263. }
  264. static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
  265. static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
  266. {
  267. /**
  268. * This relies on __test_and_clear_bit to modify the memory
  269. * in a way that is atomic with respect to the local CPU.
  270. * The hypervisor only accesses this memory from the local CPU so
  271. * there's no need for lock or memory barriers.
  272. * An optimization barrier is implied in apic write.
  273. */
  274. if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
  275. return;
  276. apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
  277. }
  278. static void kvm_guest_cpu_init(void)
  279. {
  280. if (!kvm_para_available())
  281. return;
  282. if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
  283. u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
  284. #ifdef CONFIG_PREEMPT
  285. pa |= KVM_ASYNC_PF_SEND_ALWAYS;
  286. #endif
  287. wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
  288. __this_cpu_write(apf_reason.enabled, 1);
  289. printk(KERN_INFO"KVM setup async PF for cpu %d\n",
  290. smp_processor_id());
  291. }
  292. if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
  293. unsigned long pa;
  294. /* Size alignment is implied but just to make it explicit. */
  295. BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
  296. __this_cpu_write(kvm_apic_eoi, 0);
  297. pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
  298. | KVM_MSR_ENABLED;
  299. wrmsrl(MSR_KVM_PV_EOI_EN, pa);
  300. }
  301. if (has_steal_clock)
  302. kvm_register_steal_time();
  303. }
  304. static void kvm_pv_disable_apf(void)
  305. {
  306. if (!__this_cpu_read(apf_reason.enabled))
  307. return;
  308. wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
  309. __this_cpu_write(apf_reason.enabled, 0);
  310. printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
  311. smp_processor_id());
  312. }
  313. static void kvm_pv_guest_cpu_reboot(void *unused)
  314. {
  315. /*
  316. * We disable PV EOI before we load a new kernel by kexec,
  317. * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
  318. * New kernel can re-enable when it boots.
  319. */
  320. if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
  321. wrmsrl(MSR_KVM_PV_EOI_EN, 0);
  322. kvm_pv_disable_apf();
  323. kvm_disable_steal_time();
  324. }
  325. static int kvm_pv_reboot_notify(struct notifier_block *nb,
  326. unsigned long code, void *unused)
  327. {
  328. if (code == SYS_RESTART)
  329. on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
  330. return NOTIFY_DONE;
  331. }
  332. static struct notifier_block kvm_pv_reboot_nb = {
  333. .notifier_call = kvm_pv_reboot_notify,
  334. };
  335. static u64 kvm_steal_clock(int cpu)
  336. {
  337. u64 steal;
  338. struct kvm_steal_time *src;
  339. int version;
  340. src = &per_cpu(steal_time, cpu);
  341. do {
  342. version = src->version;
  343. rmb();
  344. steal = src->steal;
  345. rmb();
  346. } while ((version & 1) || (version != src->version));
  347. return steal;
  348. }
  349. void kvm_disable_steal_time(void)
  350. {
  351. if (!has_steal_clock)
  352. return;
  353. wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
  354. }
  355. #ifdef CONFIG_SMP
  356. static void __init kvm_smp_prepare_boot_cpu(void)
  357. {
  358. kvm_guest_cpu_init();
  359. native_smp_prepare_boot_cpu();
  360. kvm_spinlock_init();
  361. }
  362. static void kvm_guest_cpu_offline(void)
  363. {
  364. kvm_disable_steal_time();
  365. if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
  366. wrmsrl(MSR_KVM_PV_EOI_EN, 0);
  367. kvm_pv_disable_apf();
  368. apf_task_wake_all();
  369. }
  370. static int kvm_cpu_online(unsigned int cpu)
  371. {
  372. local_irq_disable();
  373. kvm_guest_cpu_init();
  374. local_irq_enable();
  375. return 0;
  376. }
  377. static int kvm_cpu_down_prepare(unsigned int cpu)
  378. {
  379. local_irq_disable();
  380. kvm_guest_cpu_offline();
  381. local_irq_enable();
  382. return 0;
  383. }
  384. #endif
  385. static void __init kvm_apf_trap_init(void)
  386. {
  387. set_intr_gate(14, async_page_fault);
  388. }
  389. void __init kvm_guest_init(void)
  390. {
  391. int i;
  392. if (!kvm_para_available())
  393. return;
  394. paravirt_ops_setup();
  395. register_reboot_notifier(&kvm_pv_reboot_nb);
  396. for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
  397. raw_spin_lock_init(&async_pf_sleepers[i].lock);
  398. if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
  399. x86_init.irqs.trap_init = kvm_apf_trap_init;
  400. if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
  401. has_steal_clock = 1;
  402. pv_time_ops.steal_clock = kvm_steal_clock;
  403. }
  404. if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
  405. apic_set_eoi_write(kvm_guest_apic_eoi_write);
  406. if (kvmclock_vsyscall)
  407. kvm_setup_vsyscall_timeinfo();
  408. #ifdef CONFIG_SMP
  409. smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
  410. if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
  411. kvm_cpu_online, kvm_cpu_down_prepare) < 0)
  412. pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
  413. #else
  414. kvm_guest_cpu_init();
  415. #endif
  416. /*
  417. * Hard lockup detection is enabled by default. Disable it, as guests
  418. * can get false positives too easily, for example if the host is
  419. * overcommitted.
  420. */
  421. hardlockup_detector_disable();
  422. }
  423. static noinline uint32_t __kvm_cpuid_base(void)
  424. {
  425. if (boot_cpu_data.cpuid_level < 0)
  426. return 0; /* So we don't blow up on old processors */
  427. if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
  428. return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
  429. return 0;
  430. }
  431. static inline uint32_t kvm_cpuid_base(void)
  432. {
  433. static int kvm_cpuid_base = -1;
  434. if (kvm_cpuid_base == -1)
  435. kvm_cpuid_base = __kvm_cpuid_base();
  436. return kvm_cpuid_base;
  437. }
  438. bool kvm_para_available(void)
  439. {
  440. return kvm_cpuid_base() != 0;
  441. }
  442. EXPORT_SYMBOL_GPL(kvm_para_available);
  443. unsigned int kvm_arch_para_features(void)
  444. {
  445. return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
  446. }
  447. static uint32_t __init kvm_detect(void)
  448. {
  449. return kvm_cpuid_base();
  450. }
  451. const struct hypervisor_x86 x86_hyper_kvm __refconst = {
  452. .name = "KVM",
  453. .detect = kvm_detect,
  454. .x2apic_available = kvm_para_available,
  455. };
  456. EXPORT_SYMBOL_GPL(x86_hyper_kvm);
  457. static __init int activate_jump_labels(void)
  458. {
  459. if (has_steal_clock) {
  460. static_key_slow_inc(&paravirt_steal_enabled);
  461. if (steal_acc)
  462. static_key_slow_inc(&paravirt_steal_rq_enabled);
  463. }
  464. return 0;
  465. }
  466. arch_initcall(activate_jump_labels);
  467. #ifdef CONFIG_PARAVIRT_SPINLOCKS
  468. /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
  469. static void kvm_kick_cpu(int cpu)
  470. {
  471. int apicid;
  472. unsigned long flags = 0;
  473. apicid = per_cpu(x86_cpu_to_apicid, cpu);
  474. kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
  475. }
  476. #include <asm/qspinlock.h>
  477. static void kvm_wait(u8 *ptr, u8 val)
  478. {
  479. unsigned long flags;
  480. if (in_nmi())
  481. return;
  482. local_irq_save(flags);
  483. if (READ_ONCE(*ptr) != val)
  484. goto out;
  485. /*
  486. * halt until it's our turn and kicked. Note that we do safe halt
  487. * for irq enabled case to avoid hang when lock info is overwritten
  488. * in irq spinlock slowpath and no spurious interrupt occur to save us.
  489. */
  490. if (arch_irqs_disabled_flags(flags))
  491. halt();
  492. else
  493. safe_halt();
  494. out:
  495. local_irq_restore(flags);
  496. }
  497. __visible bool __kvm_vcpu_is_preempted(int cpu)
  498. {
  499. struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
  500. return !!src->preempted;
  501. }
  502. PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
  503. /*
  504. * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
  505. */
  506. void __init kvm_spinlock_init(void)
  507. {
  508. if (!kvm_para_available())
  509. return;
  510. /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
  511. if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
  512. return;
  513. __pv_init_lock_hash();
  514. pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
  515. pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
  516. pv_lock_ops.wait = kvm_wait;
  517. pv_lock_ops.kick = kvm_kick_cpu;
  518. if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
  519. pv_lock_ops.vcpu_is_preempted =
  520. PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
  521. }
  522. }
  523. static __init int kvm_spinlock_init_jump(void)
  524. {
  525. if (!kvm_para_available())
  526. return 0;
  527. if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
  528. return 0;
  529. static_key_slow_inc(&paravirt_ticketlocks_enabled);
  530. printk(KERN_INFO "KVM setup paravirtual spinlock\n");
  531. return 0;
  532. }
  533. early_initcall(kvm_spinlock_init_jump);
  534. #endif /* CONFIG_PARAVIRT_SPINLOCKS */