arch_timer.c 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. /*
  2. * Copyright (C) 2012 ARM Ltd.
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. */
  18. #include <linux/cpu.h>
  19. #include <linux/of_irq.h>
  20. #include <linux/kvm.h>
  21. #include <linux/kvm_host.h>
  22. #include <linux/interrupt.h>
  23. #include <clocksource/arm_arch_timer.h>
  24. #include <asm/arch_timer.h>
  25. #include <kvm/arm_vgic.h>
  26. #include <kvm/arm_arch_timer.h>
  27. static struct timecounter *timecounter;
  28. static struct workqueue_struct *wqueue;
  29. static unsigned int host_vtimer_irq;
  30. static cycle_t kvm_phys_timer_read(void)
  31. {
  32. return timecounter->cc->read(timecounter->cc);
  33. }
  34. static bool timer_is_armed(struct arch_timer_cpu *timer)
  35. {
  36. return timer->armed;
  37. }
  38. /* timer_arm: as in "arm the timer", not as in ARM the company */
  39. static void timer_arm(struct arch_timer_cpu *timer, u64 ns)
  40. {
  41. timer->armed = true;
  42. hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns),
  43. HRTIMER_MODE_ABS);
  44. }
  45. static void timer_disarm(struct arch_timer_cpu *timer)
  46. {
  47. if (timer_is_armed(timer)) {
  48. hrtimer_cancel(&timer->timer);
  49. cancel_work_sync(&timer->expired);
  50. timer->armed = false;
  51. }
  52. }
  53. static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
  54. {
  55. int ret;
  56. struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
  57. kvm_vgic_set_phys_irq_active(timer->map, true);
  58. ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
  59. timer->map,
  60. timer->irq->level);
  61. WARN_ON(ret);
  62. }
  63. static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
  64. {
  65. struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id;
  66. /*
  67. * We disable the timer in the world switch and let it be
  68. * handled by kvm_timer_sync_hwstate(). Getting a timer
  69. * interrupt at this point is a sure sign of some major
  70. * breakage.
  71. */
  72. pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu);
  73. return IRQ_HANDLED;
  74. }
  75. /*
  76. * Work function for handling the backup timer that we schedule when a vcpu is
  77. * no longer running, but had a timer programmed to fire in the future.
  78. */
  79. static void kvm_timer_inject_irq_work(struct work_struct *work)
  80. {
  81. struct kvm_vcpu *vcpu;
  82. vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
  83. vcpu->arch.timer_cpu.armed = false;
  84. /*
  85. * If the vcpu is blocked we want to wake it up so that it will see
  86. * the timer has expired when entering the guest.
  87. */
  88. kvm_vcpu_kick(vcpu);
  89. }
  90. static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
  91. {
  92. struct arch_timer_cpu *timer;
  93. timer = container_of(hrt, struct arch_timer_cpu, timer);
  94. queue_work(wqueue, &timer->expired);
  95. return HRTIMER_NORESTART;
  96. }
  97. bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
  98. {
  99. struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
  100. cycle_t cval, now;
  101. if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
  102. !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE) ||
  103. kvm_vgic_get_phys_irq_active(timer->map))
  104. return false;
  105. cval = timer->cntv_cval;
  106. now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
  107. return cval <= now;
  108. }
  109. /**
  110. * kvm_timer_flush_hwstate - prepare to move the virt timer to the cpu
  111. * @vcpu: The vcpu pointer
  112. *
  113. * Disarm any pending soft timers, since the world-switch code will write the
  114. * virtual timer state back to the physical CPU.
  115. */
  116. void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu)
  117. {
  118. struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
  119. bool phys_active;
  120. int ret;
  121. /*
  122. * We're about to run this vcpu again, so there is no need to
  123. * keep the background timer running, as we're about to
  124. * populate the CPU timer again.
  125. */
  126. timer_disarm(timer);
  127. /*
  128. * If the timer expired while we were not scheduled, now is the time
  129. * to inject it.
  130. */
  131. if (kvm_timer_should_fire(vcpu))
  132. kvm_timer_inject_irq(vcpu);
  133. /*
  134. * We keep track of whether the edge-triggered interrupt has been
  135. * signalled to the vgic/guest, and if so, we mask the interrupt and
  136. * the physical distributor to prevent the timer from raising a
  137. * physical interrupt whenever we run a guest, preventing forward
  138. * VCPU progress.
  139. */
  140. if (kvm_vgic_get_phys_irq_active(timer->map))
  141. phys_active = true;
  142. else
  143. phys_active = false;
  144. ret = irq_set_irqchip_state(timer->map->irq,
  145. IRQCHIP_STATE_ACTIVE,
  146. phys_active);
  147. WARN_ON(ret);
  148. }
  149. /**
  150. * kvm_timer_sync_hwstate - sync timer state from cpu
  151. * @vcpu: The vcpu pointer
  152. *
  153. * Check if the virtual timer was armed and either schedule a corresponding
  154. * soft timer or inject directly if already expired.
  155. */
  156. void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
  157. {
  158. struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
  159. cycle_t cval, now;
  160. u64 ns;
  161. BUG_ON(timer_is_armed(timer));
  162. if (kvm_timer_should_fire(vcpu)) {
  163. /*
  164. * Timer has already expired while we were not
  165. * looking. Inject the interrupt and carry on.
  166. */
  167. kvm_timer_inject_irq(vcpu);
  168. return;
  169. }
  170. cval = timer->cntv_cval;
  171. now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
  172. ns = cyclecounter_cyc2ns(timecounter->cc, cval - now, timecounter->mask,
  173. &timecounter->frac);
  174. timer_arm(timer, ns);
  175. }
  176. int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
  177. const struct kvm_irq_level *irq)
  178. {
  179. struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
  180. struct irq_phys_map *map;
  181. /*
  182. * The vcpu timer irq number cannot be determined in
  183. * kvm_timer_vcpu_init() because it is called much before
  184. * kvm_vcpu_set_target(). To handle this, we determine
  185. * vcpu timer irq number when the vcpu is reset.
  186. */
  187. timer->irq = irq;
  188. /*
  189. * The bits in CNTV_CTL are architecturally reset to UNKNOWN for ARMv8
  190. * and to 0 for ARMv7. We provide an implementation that always
  191. * resets the timer to be disabled and unmasked and is compliant with
  192. * the ARMv7 architecture.
  193. */
  194. timer->cntv_ctl = 0;
  195. /*
  196. * Tell the VGIC that the virtual interrupt is tied to a
  197. * physical interrupt. We do that once per VCPU.
  198. */
  199. map = kvm_vgic_map_phys_irq(vcpu, irq->irq, host_vtimer_irq);
  200. if (WARN_ON(IS_ERR(map)))
  201. return PTR_ERR(map);
  202. timer->map = map;
  203. return 0;
  204. }
  205. void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
  206. {
  207. struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
  208. INIT_WORK(&timer->expired, kvm_timer_inject_irq_work);
  209. hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
  210. timer->timer.function = kvm_timer_expire;
  211. }
  212. static void kvm_timer_init_interrupt(void *info)
  213. {
  214. enable_percpu_irq(host_vtimer_irq, 0);
  215. }
  216. int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value)
  217. {
  218. struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
  219. switch (regid) {
  220. case KVM_REG_ARM_TIMER_CTL:
  221. timer->cntv_ctl = value;
  222. break;
  223. case KVM_REG_ARM_TIMER_CNT:
  224. vcpu->kvm->arch.timer.cntvoff = kvm_phys_timer_read() - value;
  225. break;
  226. case KVM_REG_ARM_TIMER_CVAL:
  227. timer->cntv_cval = value;
  228. break;
  229. default:
  230. return -1;
  231. }
  232. return 0;
  233. }
  234. u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid)
  235. {
  236. struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
  237. switch (regid) {
  238. case KVM_REG_ARM_TIMER_CTL:
  239. return timer->cntv_ctl;
  240. case KVM_REG_ARM_TIMER_CNT:
  241. return kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
  242. case KVM_REG_ARM_TIMER_CVAL:
  243. return timer->cntv_cval;
  244. }
  245. return (u64)-1;
  246. }
  247. static int kvm_timer_cpu_notify(struct notifier_block *self,
  248. unsigned long action, void *cpu)
  249. {
  250. switch (action) {
  251. case CPU_STARTING:
  252. case CPU_STARTING_FROZEN:
  253. kvm_timer_init_interrupt(NULL);
  254. break;
  255. case CPU_DYING:
  256. case CPU_DYING_FROZEN:
  257. disable_percpu_irq(host_vtimer_irq);
  258. break;
  259. }
  260. return NOTIFY_OK;
  261. }
  262. static struct notifier_block kvm_timer_cpu_nb = {
  263. .notifier_call = kvm_timer_cpu_notify,
  264. };
  265. static const struct of_device_id arch_timer_of_match[] = {
  266. { .compatible = "arm,armv7-timer", },
  267. { .compatible = "arm,armv8-timer", },
  268. {},
  269. };
  270. int kvm_timer_hyp_init(void)
  271. {
  272. struct device_node *np;
  273. unsigned int ppi;
  274. int err;
  275. timecounter = arch_timer_get_timecounter();
  276. if (!timecounter)
  277. return -ENODEV;
  278. np = of_find_matching_node(NULL, arch_timer_of_match);
  279. if (!np) {
  280. kvm_err("kvm_arch_timer: can't find DT node\n");
  281. return -ENODEV;
  282. }
  283. ppi = irq_of_parse_and_map(np, 2);
  284. if (!ppi) {
  285. kvm_err("kvm_arch_timer: no virtual timer interrupt\n");
  286. err = -EINVAL;
  287. goto out;
  288. }
  289. err = request_percpu_irq(ppi, kvm_arch_timer_handler,
  290. "kvm guest timer", kvm_get_running_vcpus());
  291. if (err) {
  292. kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n",
  293. ppi, err);
  294. goto out;
  295. }
  296. host_vtimer_irq = ppi;
  297. err = __register_cpu_notifier(&kvm_timer_cpu_nb);
  298. if (err) {
  299. kvm_err("Cannot register timer CPU notifier\n");
  300. goto out_free;
  301. }
  302. wqueue = create_singlethread_workqueue("kvm_arch_timer");
  303. if (!wqueue) {
  304. err = -ENOMEM;
  305. goto out_free;
  306. }
  307. kvm_info("%s IRQ%d\n", np->name, ppi);
  308. on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
  309. goto out;
  310. out_free:
  311. free_percpu_irq(ppi, kvm_get_running_vcpus());
  312. out:
  313. of_node_put(np);
  314. return err;
  315. }
  316. void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
  317. {
  318. struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
  319. timer_disarm(timer);
  320. if (timer->map)
  321. kvm_vgic_unmap_phys_irq(vcpu, timer->map);
  322. }
  323. void kvm_timer_enable(struct kvm *kvm)
  324. {
  325. if (kvm->arch.timer.enabled)
  326. return;
  327. /*
  328. * There is a potential race here between VCPUs starting for the first
  329. * time, which may be enabling the timer multiple times. That doesn't
  330. * hurt though, because we're just setting a variable to the same
  331. * variable that it already was. The important thing is that all
  332. * VCPUs have the enabled variable set, before entering the guest, if
  333. * the arch timers are enabled.
  334. */
  335. if (timecounter && wqueue)
  336. kvm->arch.timer.enabled = 1;
  337. }
  338. void kvm_timer_init(struct kvm *kvm)
  339. {
  340. kvm->arch.timer.cntvoff = kvm_phys_timer_read();
  341. }