kvm.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883
  1. /*
  2. * KVM paravirt_ops implementation
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. *
  18. * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  19. * Copyright IBM Corporation, 2007
  20. * Authors: Anthony Liguori <aliguori@us.ibm.com>
  21. */
  22. #include <linux/context_tracking.h>
  23. #include <linux/init.h>
  24. #include <linux/kernel.h>
  25. #include <linux/kvm_para.h>
  26. #include <linux/cpu.h>
  27. #include <linux/mm.h>
  28. #include <linux/highmem.h>
  29. #include <linux/hardirq.h>
  30. #include <linux/notifier.h>
  31. #include <linux/reboot.h>
  32. #include <linux/hash.h>
  33. #include <linux/sched.h>
  34. #include <linux/slab.h>
  35. #include <linux/kprobes.h>
  36. #include <linux/debugfs.h>
  37. #include <linux/nmi.h>
  38. #include <linux/swait.h>
  39. #include <asm/timer.h>
  40. #include <asm/cpu.h>
  41. #include <asm/traps.h>
  42. #include <asm/desc.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/idle.h>
  45. #include <asm/apic.h>
  46. #include <asm/apicdef.h>
  47. #include <asm/hypervisor.h>
  48. #include <asm/kvm_guest.h>
  49. static int kvmapf = 1;
  50. static int parse_no_kvmapf(char *arg)
  51. {
  52. kvmapf = 0;
  53. return 0;
  54. }
  55. early_param("no-kvmapf", parse_no_kvmapf);
  56. static int steal_acc = 1;
  57. static int parse_no_stealacc(char *arg)
  58. {
  59. steal_acc = 0;
  60. return 0;
  61. }
  62. early_param("no-steal-acc", parse_no_stealacc);
  63. static int kvmclock_vsyscall = 1;
  64. static int parse_no_kvmclock_vsyscall(char *arg)
  65. {
  66. kvmclock_vsyscall = 0;
  67. return 0;
  68. }
  69. early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
  70. static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
  71. static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
  72. static int has_steal_clock = 0;
  73. /*
  74. * No need for any "IO delay" on KVM
  75. */
  76. static void kvm_io_delay(void)
  77. {
  78. }
  79. #define KVM_TASK_SLEEP_HASHBITS 8
  80. #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
  81. struct kvm_task_sleep_node {
  82. struct hlist_node link;
  83. struct swait_queue_head wq;
  84. u32 token;
  85. int cpu;
  86. bool halted;
  87. };
  88. static struct kvm_task_sleep_head {
  89. raw_spinlock_t lock;
  90. struct hlist_head list;
  91. } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
  92. static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
  93. u32 token)
  94. {
  95. struct hlist_node *p;
  96. hlist_for_each(p, &b->list) {
  97. struct kvm_task_sleep_node *n =
  98. hlist_entry(p, typeof(*n), link);
  99. if (n->token == token)
  100. return n;
  101. }
  102. return NULL;
  103. }
  104. void kvm_async_pf_task_wait(u32 token)
  105. {
  106. u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
  107. struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
  108. struct kvm_task_sleep_node n, *e;
  109. DECLARE_SWAITQUEUE(wait);
  110. rcu_irq_enter();
  111. raw_spin_lock(&b->lock);
  112. e = _find_apf_task(b, token);
  113. if (e) {
  114. /* dummy entry exist -> wake up was delivered ahead of PF */
  115. hlist_del(&e->link);
  116. kfree(e);
  117. raw_spin_unlock(&b->lock);
  118. rcu_irq_exit();
  119. return;
  120. }
  121. n.token = token;
  122. n.cpu = smp_processor_id();
  123. n.halted = is_idle_task(current) || preempt_count() > 1;
  124. init_swait_queue_head(&n.wq);
  125. hlist_add_head(&n.link, &b->list);
  126. raw_spin_unlock(&b->lock);
  127. for (;;) {
  128. if (!n.halted)
  129. prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
  130. if (hlist_unhashed(&n.link))
  131. break;
  132. if (!n.halted) {
  133. local_irq_enable();
  134. schedule();
  135. local_irq_disable();
  136. } else {
  137. /*
  138. * We cannot reschedule. So halt.
  139. */
  140. rcu_irq_exit();
  141. native_safe_halt();
  142. rcu_irq_enter();
  143. local_irq_disable();
  144. }
  145. }
  146. if (!n.halted)
  147. finish_swait(&n.wq, &wait);
  148. rcu_irq_exit();
  149. return;
  150. }
  151. EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
  152. static void apf_task_wake_one(struct kvm_task_sleep_node *n)
  153. {
  154. hlist_del_init(&n->link);
  155. if (n->halted)
  156. smp_send_reschedule(n->cpu);
  157. else if (swait_active(&n->wq))
  158. swake_up(&n->wq);
  159. }
  160. static void apf_task_wake_all(void)
  161. {
  162. int i;
  163. for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
  164. struct hlist_node *p, *next;
  165. struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
  166. raw_spin_lock(&b->lock);
  167. hlist_for_each_safe(p, next, &b->list) {
  168. struct kvm_task_sleep_node *n =
  169. hlist_entry(p, typeof(*n), link);
  170. if (n->cpu == smp_processor_id())
  171. apf_task_wake_one(n);
  172. }
  173. raw_spin_unlock(&b->lock);
  174. }
  175. }
  176. void kvm_async_pf_task_wake(u32 token)
  177. {
  178. u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
  179. struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
  180. struct kvm_task_sleep_node *n;
  181. if (token == ~0) {
  182. apf_task_wake_all();
  183. return;
  184. }
  185. again:
  186. raw_spin_lock(&b->lock);
  187. n = _find_apf_task(b, token);
  188. if (!n) {
  189. /*
  190. * async PF was not yet handled.
  191. * Add dummy entry for the token.
  192. */
  193. n = kzalloc(sizeof(*n), GFP_ATOMIC);
  194. if (!n) {
  195. /*
  196. * Allocation failed! Busy wait while other cpu
  197. * handles async PF.
  198. */
  199. raw_spin_unlock(&b->lock);
  200. cpu_relax();
  201. goto again;
  202. }
  203. n->token = token;
  204. n->cpu = smp_processor_id();
  205. init_swait_queue_head(&n->wq);
  206. hlist_add_head(&n->link, &b->list);
  207. } else
  208. apf_task_wake_one(n);
  209. raw_spin_unlock(&b->lock);
  210. return;
  211. }
  212. EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
  213. u32 kvm_read_and_reset_pf_reason(void)
  214. {
  215. u32 reason = 0;
  216. if (__this_cpu_read(apf_reason.enabled)) {
  217. reason = __this_cpu_read(apf_reason.reason);
  218. __this_cpu_write(apf_reason.reason, 0);
  219. }
  220. return reason;
  221. }
  222. EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
  223. NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
  224. dotraplinkage void
  225. do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
  226. {
  227. enum ctx_state prev_state;
  228. switch (kvm_read_and_reset_pf_reason()) {
  229. default:
  230. trace_do_page_fault(regs, error_code);
  231. break;
  232. case KVM_PV_REASON_PAGE_NOT_PRESENT:
  233. /* page is swapped out by the host. */
  234. prev_state = exception_enter();
  235. exit_idle();
  236. kvm_async_pf_task_wait((u32)read_cr2());
  237. exception_exit(prev_state);
  238. break;
  239. case KVM_PV_REASON_PAGE_READY:
  240. rcu_irq_enter();
  241. exit_idle();
  242. kvm_async_pf_task_wake((u32)read_cr2());
  243. rcu_irq_exit();
  244. break;
  245. }
  246. }
  247. NOKPROBE_SYMBOL(do_async_page_fault);
  248. static void __init paravirt_ops_setup(void)
  249. {
  250. pv_info.name = "KVM";
  251. if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
  252. pv_cpu_ops.io_delay = kvm_io_delay;
  253. #ifdef CONFIG_X86_IO_APIC
  254. no_timer_check = 1;
  255. #endif
  256. }
  257. static void kvm_register_steal_time(void)
  258. {
  259. int cpu = smp_processor_id();
  260. struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
  261. if (!has_steal_clock)
  262. return;
  263. wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
  264. pr_info("kvm-stealtime: cpu %d, msr %llx\n",
  265. cpu, (unsigned long long) slow_virt_to_phys(st));
  266. }
  267. static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
  268. static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
  269. {
  270. /**
  271. * This relies on __test_and_clear_bit to modify the memory
  272. * in a way that is atomic with respect to the local CPU.
  273. * The hypervisor only accesses this memory from the local CPU so
  274. * there's no need for lock or memory barriers.
  275. * An optimization barrier is implied in apic write.
  276. */
  277. if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
  278. return;
  279. apic_write(APIC_EOI, APIC_EOI_ACK);
  280. }
  281. static void kvm_guest_cpu_init(void)
  282. {
  283. if (!kvm_para_available())
  284. return;
  285. if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
  286. u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
  287. #ifdef CONFIG_PREEMPT
  288. pa |= KVM_ASYNC_PF_SEND_ALWAYS;
  289. #endif
  290. wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
  291. __this_cpu_write(apf_reason.enabled, 1);
  292. printk(KERN_INFO"KVM setup async PF for cpu %d\n",
  293. smp_processor_id());
  294. }
  295. if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
  296. unsigned long pa;
  297. /* Size alignment is implied but just to make it explicit. */
  298. BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
  299. __this_cpu_write(kvm_apic_eoi, 0);
  300. pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
  301. | KVM_MSR_ENABLED;
  302. wrmsrl(MSR_KVM_PV_EOI_EN, pa);
  303. }
  304. if (has_steal_clock)
  305. kvm_register_steal_time();
  306. }
  307. static void kvm_pv_disable_apf(void)
  308. {
  309. if (!__this_cpu_read(apf_reason.enabled))
  310. return;
  311. wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
  312. __this_cpu_write(apf_reason.enabled, 0);
  313. printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
  314. smp_processor_id());
  315. }
  316. static void kvm_pv_guest_cpu_reboot(void *unused)
  317. {
  318. /*
  319. * We disable PV EOI before we load a new kernel by kexec,
  320. * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
  321. * New kernel can re-enable when it boots.
  322. */
  323. if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
  324. wrmsrl(MSR_KVM_PV_EOI_EN, 0);
  325. kvm_pv_disable_apf();
  326. kvm_disable_steal_time();
  327. }
  328. static int kvm_pv_reboot_notify(struct notifier_block *nb,
  329. unsigned long code, void *unused)
  330. {
  331. if (code == SYS_RESTART)
  332. on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
  333. return NOTIFY_DONE;
  334. }
  335. static struct notifier_block kvm_pv_reboot_nb = {
  336. .notifier_call = kvm_pv_reboot_notify,
  337. };
  338. static u64 kvm_steal_clock(int cpu)
  339. {
  340. u64 steal;
  341. struct kvm_steal_time *src;
  342. int version;
  343. src = &per_cpu(steal_time, cpu);
  344. do {
  345. version = src->version;
  346. rmb();
  347. steal = src->steal;
  348. rmb();
  349. } while ((version & 1) || (version != src->version));
  350. return steal;
  351. }
  352. void kvm_disable_steal_time(void)
  353. {
  354. if (!has_steal_clock)
  355. return;
  356. wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
  357. }
  358. #ifdef CONFIG_SMP
  359. static void __init kvm_smp_prepare_boot_cpu(void)
  360. {
  361. kvm_guest_cpu_init();
  362. native_smp_prepare_boot_cpu();
  363. kvm_spinlock_init();
  364. }
  365. static void kvm_guest_cpu_online(void *dummy)
  366. {
  367. kvm_guest_cpu_init();
  368. }
  369. static void kvm_guest_cpu_offline(void *dummy)
  370. {
  371. kvm_disable_steal_time();
  372. if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
  373. wrmsrl(MSR_KVM_PV_EOI_EN, 0);
  374. kvm_pv_disable_apf();
  375. apf_task_wake_all();
  376. }
  377. static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
  378. void *hcpu)
  379. {
  380. int cpu = (unsigned long)hcpu;
  381. switch (action) {
  382. case CPU_ONLINE:
  383. case CPU_DOWN_FAILED:
  384. case CPU_ONLINE_FROZEN:
  385. smp_call_function_single(cpu, kvm_guest_cpu_online, NULL, 0);
  386. break;
  387. case CPU_DOWN_PREPARE:
  388. case CPU_DOWN_PREPARE_FROZEN:
  389. smp_call_function_single(cpu, kvm_guest_cpu_offline, NULL, 1);
  390. break;
  391. default:
  392. break;
  393. }
  394. return NOTIFY_OK;
  395. }
  396. static struct notifier_block kvm_cpu_notifier = {
  397. .notifier_call = kvm_cpu_notify,
  398. };
  399. #endif
  400. static void __init kvm_apf_trap_init(void)
  401. {
  402. set_intr_gate(14, async_page_fault);
  403. }
  404. void __init kvm_guest_init(void)
  405. {
  406. int i;
  407. if (!kvm_para_available())
  408. return;
  409. paravirt_ops_setup();
  410. register_reboot_notifier(&kvm_pv_reboot_nb);
  411. for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
  412. raw_spin_lock_init(&async_pf_sleepers[i].lock);
  413. if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
  414. x86_init.irqs.trap_init = kvm_apf_trap_init;
  415. if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
  416. has_steal_clock = 1;
  417. pv_time_ops.steal_clock = kvm_steal_clock;
  418. }
  419. if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
  420. apic_set_eoi_write(kvm_guest_apic_eoi_write);
  421. if (kvmclock_vsyscall)
  422. kvm_setup_vsyscall_timeinfo();
  423. #ifdef CONFIG_SMP
  424. smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
  425. register_cpu_notifier(&kvm_cpu_notifier);
  426. #else
  427. kvm_guest_cpu_init();
  428. #endif
  429. /*
  430. * Hard lockup detection is enabled by default. Disable it, as guests
  431. * can get false positives too easily, for example if the host is
  432. * overcommitted.
  433. */
  434. hardlockup_detector_disable();
  435. }
  436. static noinline uint32_t __kvm_cpuid_base(void)
  437. {
  438. if (boot_cpu_data.cpuid_level < 0)
  439. return 0; /* So we don't blow up on old processors */
  440. if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
  441. return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
  442. return 0;
  443. }
  444. static inline uint32_t kvm_cpuid_base(void)
  445. {
  446. static int kvm_cpuid_base = -1;
  447. if (kvm_cpuid_base == -1)
  448. kvm_cpuid_base = __kvm_cpuid_base();
  449. return kvm_cpuid_base;
  450. }
  451. bool kvm_para_available(void)
  452. {
  453. return kvm_cpuid_base() != 0;
  454. }
  455. EXPORT_SYMBOL_GPL(kvm_para_available);
  456. unsigned int kvm_arch_para_features(void)
  457. {
  458. return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
  459. }
  460. static uint32_t __init kvm_detect(void)
  461. {
  462. return kvm_cpuid_base();
  463. }
  464. const struct hypervisor_x86 x86_hyper_kvm __refconst = {
  465. .name = "KVM",
  466. .detect = kvm_detect,
  467. .x2apic_available = kvm_para_available,
  468. };
  469. EXPORT_SYMBOL_GPL(x86_hyper_kvm);
  470. static __init int activate_jump_labels(void)
  471. {
  472. if (has_steal_clock) {
  473. static_key_slow_inc(&paravirt_steal_enabled);
  474. if (steal_acc)
  475. static_key_slow_inc(&paravirt_steal_rq_enabled);
  476. }
  477. return 0;
  478. }
  479. arch_initcall(activate_jump_labels);
  480. #ifdef CONFIG_PARAVIRT_SPINLOCKS
  481. /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
  482. static void kvm_kick_cpu(int cpu)
  483. {
  484. int apicid;
  485. unsigned long flags = 0;
  486. apicid = per_cpu(x86_cpu_to_apicid, cpu);
  487. kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
  488. }
  489. #ifdef CONFIG_QUEUED_SPINLOCKS
  490. #include <asm/qspinlock.h>
  491. static void kvm_wait(u8 *ptr, u8 val)
  492. {
  493. unsigned long flags;
  494. if (in_nmi())
  495. return;
  496. local_irq_save(flags);
  497. if (READ_ONCE(*ptr) != val)
  498. goto out;
  499. /*
  500. * halt until it's our turn and kicked. Note that we do safe halt
  501. * for irq enabled case to avoid hang when lock info is overwritten
  502. * in irq spinlock slowpath and no spurious interrupt occur to save us.
  503. */
  504. if (arch_irqs_disabled_flags(flags))
  505. halt();
  506. else
  507. safe_halt();
  508. out:
  509. local_irq_restore(flags);
  510. }
  511. #else /* !CONFIG_QUEUED_SPINLOCKS */
  512. enum kvm_contention_stat {
  513. TAKEN_SLOW,
  514. TAKEN_SLOW_PICKUP,
  515. RELEASED_SLOW,
  516. RELEASED_SLOW_KICKED,
  517. NR_CONTENTION_STATS
  518. };
  519. #ifdef CONFIG_KVM_DEBUG_FS
  520. #define HISTO_BUCKETS 30
  521. static struct kvm_spinlock_stats
  522. {
  523. u32 contention_stats[NR_CONTENTION_STATS];
  524. u32 histo_spin_blocked[HISTO_BUCKETS+1];
  525. u64 time_blocked;
  526. } spinlock_stats;
  527. static u8 zero_stats;
  528. static inline void check_zero(void)
  529. {
  530. u8 ret;
  531. u8 old;
  532. old = READ_ONCE(zero_stats);
  533. if (unlikely(old)) {
  534. ret = cmpxchg(&zero_stats, old, 0);
  535. /* This ensures only one fellow resets the stat */
  536. if (ret == old)
  537. memset(&spinlock_stats, 0, sizeof(spinlock_stats));
  538. }
  539. }
  540. static inline void add_stats(enum kvm_contention_stat var, u32 val)
  541. {
  542. check_zero();
  543. spinlock_stats.contention_stats[var] += val;
  544. }
  545. static inline u64 spin_time_start(void)
  546. {
  547. return sched_clock();
  548. }
  549. static void __spin_time_accum(u64 delta, u32 *array)
  550. {
  551. unsigned index;
  552. index = ilog2(delta);
  553. check_zero();
  554. if (index < HISTO_BUCKETS)
  555. array[index]++;
  556. else
  557. array[HISTO_BUCKETS]++;
  558. }
  559. static inline void spin_time_accum_blocked(u64 start)
  560. {
  561. u32 delta;
  562. delta = sched_clock() - start;
  563. __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
  564. spinlock_stats.time_blocked += delta;
  565. }
  566. static struct dentry *d_spin_debug;
  567. static struct dentry *d_kvm_debug;
  568. static struct dentry *kvm_init_debugfs(void)
  569. {
  570. d_kvm_debug = debugfs_create_dir("kvm-guest", NULL);
  571. if (!d_kvm_debug)
  572. printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
  573. return d_kvm_debug;
  574. }
  575. static int __init kvm_spinlock_debugfs(void)
  576. {
  577. struct dentry *d_kvm;
  578. d_kvm = kvm_init_debugfs();
  579. if (d_kvm == NULL)
  580. return -ENOMEM;
  581. d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
  582. debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
  583. debugfs_create_u32("taken_slow", 0444, d_spin_debug,
  584. &spinlock_stats.contention_stats[TAKEN_SLOW]);
  585. debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
  586. &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
  587. debugfs_create_u32("released_slow", 0444, d_spin_debug,
  588. &spinlock_stats.contention_stats[RELEASED_SLOW]);
  589. debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
  590. &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
  591. debugfs_create_u64("time_blocked", 0444, d_spin_debug,
  592. &spinlock_stats.time_blocked);
  593. debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
  594. spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
  595. return 0;
  596. }
  597. fs_initcall(kvm_spinlock_debugfs);
  598. #else /* !CONFIG_KVM_DEBUG_FS */
  599. static inline void add_stats(enum kvm_contention_stat var, u32 val)
  600. {
  601. }
  602. static inline u64 spin_time_start(void)
  603. {
  604. return 0;
  605. }
  606. static inline void spin_time_accum_blocked(u64 start)
  607. {
  608. }
  609. #endif /* CONFIG_KVM_DEBUG_FS */
  610. struct kvm_lock_waiting {
  611. struct arch_spinlock *lock;
  612. __ticket_t want;
  613. };
  614. /* cpus 'waiting' on a spinlock to become available */
  615. static cpumask_t waiting_cpus;
  616. /* Track spinlock on which a cpu is waiting */
  617. static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
  618. __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
  619. {
  620. struct kvm_lock_waiting *w;
  621. int cpu;
  622. u64 start;
  623. unsigned long flags;
  624. __ticket_t head;
  625. if (in_nmi())
  626. return;
  627. w = this_cpu_ptr(&klock_waiting);
  628. cpu = smp_processor_id();
  629. start = spin_time_start();
  630. /*
  631. * Make sure an interrupt handler can't upset things in a
  632. * partially setup state.
  633. */
  634. local_irq_save(flags);
  635. /*
  636. * The ordering protocol on this is that the "lock" pointer
  637. * may only be set non-NULL if the "want" ticket is correct.
  638. * If we're updating "want", we must first clear "lock".
  639. */
  640. w->lock = NULL;
  641. smp_wmb();
  642. w->want = want;
  643. smp_wmb();
  644. w->lock = lock;
  645. add_stats(TAKEN_SLOW, 1);
  646. /*
  647. * This uses set_bit, which is atomic but we should not rely on its
  648. * reordering gurantees. So barrier is needed after this call.
  649. */
  650. cpumask_set_cpu(cpu, &waiting_cpus);
  651. barrier();
  652. /*
  653. * Mark entry to slowpath before doing the pickup test to make
  654. * sure we don't deadlock with an unlocker.
  655. */
  656. __ticket_enter_slowpath(lock);
  657. /* make sure enter_slowpath, which is atomic does not cross the read */
  658. smp_mb__after_atomic();
  659. /*
  660. * check again make sure it didn't become free while
  661. * we weren't looking.
  662. */
  663. head = READ_ONCE(lock->tickets.head);
  664. if (__tickets_equal(head, want)) {
  665. add_stats(TAKEN_SLOW_PICKUP, 1);
  666. goto out;
  667. }
  668. /*
  669. * halt until it's our turn and kicked. Note that we do safe halt
  670. * for irq enabled case to avoid hang when lock info is overwritten
  671. * in irq spinlock slowpath and no spurious interrupt occur to save us.
  672. */
  673. if (arch_irqs_disabled_flags(flags))
  674. halt();
  675. else
  676. safe_halt();
  677. out:
  678. cpumask_clear_cpu(cpu, &waiting_cpus);
  679. w->lock = NULL;
  680. local_irq_restore(flags);
  681. spin_time_accum_blocked(start);
  682. }
  683. PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
  684. /* Kick vcpu waiting on @lock->head to reach value @ticket */
  685. static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
  686. {
  687. int cpu;
  688. add_stats(RELEASED_SLOW, 1);
  689. for_each_cpu(cpu, &waiting_cpus) {
  690. const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
  691. if (READ_ONCE(w->lock) == lock &&
  692. READ_ONCE(w->want) == ticket) {
  693. add_stats(RELEASED_SLOW_KICKED, 1);
  694. kvm_kick_cpu(cpu);
  695. break;
  696. }
  697. }
  698. }
  699. #endif /* !CONFIG_QUEUED_SPINLOCKS */
  700. /*
  701. * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
  702. */
  703. void __init kvm_spinlock_init(void)
  704. {
  705. if (!kvm_para_available())
  706. return;
  707. /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
  708. if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
  709. return;
  710. #ifdef CONFIG_QUEUED_SPINLOCKS
  711. __pv_init_lock_hash();
  712. pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
  713. pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
  714. pv_lock_ops.wait = kvm_wait;
  715. pv_lock_ops.kick = kvm_kick_cpu;
  716. #else /* !CONFIG_QUEUED_SPINLOCKS */
  717. pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
  718. pv_lock_ops.unlock_kick = kvm_unlock_kick;
  719. #endif
  720. }
  721. static __init int kvm_spinlock_init_jump(void)
  722. {
  723. if (!kvm_para_available())
  724. return 0;
  725. if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
  726. return 0;
  727. static_key_slow_inc(&paravirt_ticketlocks_enabled);
  728. printk(KERN_INFO "KVM setup paravirtual spinlock\n");
  729. return 0;
  730. }
  731. early_initcall(kvm_spinlock_init_jump);
  732. #endif /* CONFIG_PARAVIRT_SPINLOCKS */