ioapic.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /*
  2. * Copyright (C) 2001 MandrakeSoft S.A.
  3. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  4. *
  5. * MandrakeSoft S.A.
  6. * 43, rue d'Aboukir
  7. * 75002 Paris - France
  8. * http://www.linux-mandrake.com/
  9. * http://www.mandrakesoft.com/
  10. *
  11. * This library is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU Lesser General Public
  13. * License as published by the Free Software Foundation; either
  14. * version 2 of the License, or (at your option) any later version.
  15. *
  16. * This library is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  19. * Lesser General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU Lesser General Public
  22. * License along with this library; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  24. *
  25. * Yunhong Jiang <yunhong.jiang@intel.com>
  26. * Yaozu (Eddie) Dong <eddie.dong@intel.com>
  27. * Based on Xen 3.1 code.
  28. */
  29. #include <linux/kvm_host.h>
  30. #include <linux/kvm.h>
  31. #include <linux/mm.h>
  32. #include <linux/highmem.h>
  33. #include <linux/smp.h>
  34. #include <linux/hrtimer.h>
  35. #include <linux/io.h>
  36. #include <linux/slab.h>
  37. #include <linux/export.h>
  38. #include <asm/processor.h>
  39. #include <asm/page.h>
  40. #include <asm/current.h>
  41. #include <trace/events/kvm.h>
  42. #include "ioapic.h"
  43. #include "lapic.h"
  44. #include "irq.h"
  45. #if 0
  46. #define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
  47. #else
  48. #define ioapic_debug(fmt, arg...)
  49. #endif
  50. static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
  51. bool line_status);
  52. static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
  53. unsigned long addr,
  54. unsigned long length)
  55. {
  56. unsigned long result = 0;
  57. switch (ioapic->ioregsel) {
  58. case IOAPIC_REG_VERSION:
  59. result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
  60. | (IOAPIC_VERSION_ID & 0xff));
  61. break;
  62. case IOAPIC_REG_APIC_ID:
  63. case IOAPIC_REG_ARB_ID:
  64. result = ((ioapic->id & 0xf) << 24);
  65. break;
  66. default:
  67. {
  68. u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
  69. u64 redir_content;
  70. if (redir_index < IOAPIC_NUM_PINS)
  71. redir_content =
  72. ioapic->redirtbl[redir_index].bits;
  73. else
  74. redir_content = ~0ULL;
  75. result = (ioapic->ioregsel & 0x1) ?
  76. (redir_content >> 32) & 0xffffffff :
  77. redir_content & 0xffffffff;
  78. break;
  79. }
  80. }
  81. return result;
  82. }
  83. static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
  84. {
  85. ioapic->rtc_status.pending_eoi = 0;
  86. bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS);
  87. }
  88. static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
  89. static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
  90. {
  91. if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
  92. kvm_rtc_eoi_tracking_restore_all(ioapic);
  93. }
  94. static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
  95. {
  96. bool new_val, old_val;
  97. struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
  98. union kvm_ioapic_redirect_entry *e;
  99. e = &ioapic->redirtbl[RTC_GSI];
  100. if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id,
  101. e->fields.dest_mode))
  102. return;
  103. new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
  104. old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
  105. if (new_val == old_val)
  106. return;
  107. if (new_val) {
  108. __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
  109. ioapic->rtc_status.pending_eoi++;
  110. } else {
  111. __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
  112. ioapic->rtc_status.pending_eoi--;
  113. rtc_status_pending_eoi_check_valid(ioapic);
  114. }
  115. }
  116. void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
  117. {
  118. struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
  119. spin_lock(&ioapic->lock);
  120. __rtc_irq_eoi_tracking_restore_one(vcpu);
  121. spin_unlock(&ioapic->lock);
  122. }
  123. static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
  124. {
  125. struct kvm_vcpu *vcpu;
  126. int i;
  127. if (RTC_GSI >= IOAPIC_NUM_PINS)
  128. return;
  129. rtc_irq_eoi_tracking_reset(ioapic);
  130. kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
  131. __rtc_irq_eoi_tracking_restore_one(vcpu);
  132. }
  133. static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
  134. {
  135. if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) {
  136. --ioapic->rtc_status.pending_eoi;
  137. rtc_status_pending_eoi_check_valid(ioapic);
  138. }
  139. }
  140. static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
  141. {
  142. if (ioapic->rtc_status.pending_eoi > 0)
  143. return true; /* coalesced */
  144. return false;
  145. }
  146. static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
  147. int irq_level, bool line_status)
  148. {
  149. union kvm_ioapic_redirect_entry entry;
  150. u32 mask = 1 << irq;
  151. u32 old_irr;
  152. int edge, ret;
  153. entry = ioapic->redirtbl[irq];
  154. edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
  155. if (!irq_level) {
  156. ioapic->irr &= ~mask;
  157. ret = 1;
  158. goto out;
  159. }
  160. /*
  161. * Return 0 for coalesced interrupts; for edge-triggered interrupts,
  162. * this only happens if a previous edge has not been delivered due
  163. * do masking. For level interrupts, the remote_irr field tells
  164. * us if the interrupt is waiting for an EOI.
  165. *
  166. * RTC is special: it is edge-triggered, but userspace likes to know
  167. * if it has been already ack-ed via EOI because coalesced RTC
  168. * interrupts lead to time drift in Windows guests. So we track
  169. * EOI manually for the RTC interrupt.
  170. */
  171. if (irq == RTC_GSI && line_status &&
  172. rtc_irq_check_coalesced(ioapic)) {
  173. ret = 0;
  174. goto out;
  175. }
  176. old_irr = ioapic->irr;
  177. ioapic->irr |= mask;
  178. if (edge)
  179. ioapic->irr_delivered &= ~mask;
  180. if ((edge && old_irr == ioapic->irr) ||
  181. (!edge && entry.fields.remote_irr)) {
  182. ret = 0;
  183. goto out;
  184. }
  185. ret = ioapic_service(ioapic, irq, line_status);
  186. out:
  187. trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
  188. return ret;
  189. }
  190. static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
  191. {
  192. u32 idx;
  193. rtc_irq_eoi_tracking_reset(ioapic);
  194. for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
  195. ioapic_set_irq(ioapic, idx, 1, true);
  196. kvm_rtc_eoi_tracking_restore_all(ioapic);
  197. }
  198. static void update_handled_vectors(struct kvm_ioapic *ioapic)
  199. {
  200. DECLARE_BITMAP(handled_vectors, 256);
  201. int i;
  202. memset(handled_vectors, 0, sizeof(handled_vectors));
  203. for (i = 0; i < IOAPIC_NUM_PINS; ++i)
  204. __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
  205. memcpy(ioapic->handled_vectors, handled_vectors,
  206. sizeof(handled_vectors));
  207. smp_wmb();
  208. }
  209. void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
  210. u32 *tmr)
  211. {
  212. struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
  213. union kvm_ioapic_redirect_entry *e;
  214. int index;
  215. spin_lock(&ioapic->lock);
  216. for (index = 0; index < IOAPIC_NUM_PINS; index++) {
  217. e = &ioapic->redirtbl[index];
  218. if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
  219. kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
  220. index == RTC_GSI) {
  221. if (kvm_apic_match_dest(vcpu, NULL, 0,
  222. e->fields.dest_id, e->fields.dest_mode)) {
  223. __set_bit(e->fields.vector,
  224. (unsigned long *)eoi_exit_bitmap);
  225. if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG)
  226. __set_bit(e->fields.vector,
  227. (unsigned long *)tmr);
  228. }
  229. }
  230. }
  231. spin_unlock(&ioapic->lock);
  232. }
  233. void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
  234. {
  235. struct kvm_ioapic *ioapic = kvm->arch.vioapic;
  236. if (!ioapic)
  237. return;
  238. kvm_make_scan_ioapic_request(kvm);
  239. }
  240. static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
  241. {
  242. unsigned index;
  243. bool mask_before, mask_after;
  244. union kvm_ioapic_redirect_entry *e;
  245. switch (ioapic->ioregsel) {
  246. case IOAPIC_REG_VERSION:
  247. /* Writes are ignored. */
  248. break;
  249. case IOAPIC_REG_APIC_ID:
  250. ioapic->id = (val >> 24) & 0xf;
  251. break;
  252. case IOAPIC_REG_ARB_ID:
  253. break;
  254. default:
  255. index = (ioapic->ioregsel - 0x10) >> 1;
  256. ioapic_debug("change redir index %x val %x\n", index, val);
  257. if (index >= IOAPIC_NUM_PINS)
  258. return;
  259. e = &ioapic->redirtbl[index];
  260. mask_before = e->fields.mask;
  261. if (ioapic->ioregsel & 1) {
  262. e->bits &= 0xffffffff;
  263. e->bits |= (u64) val << 32;
  264. } else {
  265. e->bits &= ~0xffffffffULL;
  266. e->bits |= (u32) val;
  267. e->fields.remote_irr = 0;
  268. }
  269. update_handled_vectors(ioapic);
  270. mask_after = e->fields.mask;
  271. if (mask_before != mask_after)
  272. kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
  273. if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
  274. && ioapic->irr & (1 << index))
  275. ioapic_service(ioapic, index, false);
  276. kvm_vcpu_request_scan_ioapic(ioapic->kvm);
  277. break;
  278. }
  279. }
  280. static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
  281. {
  282. union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
  283. struct kvm_lapic_irq irqe;
  284. int ret;
  285. if (entry->fields.mask)
  286. return -1;
  287. ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
  288. "vector=%x trig_mode=%x\n",
  289. entry->fields.dest_id, entry->fields.dest_mode,
  290. entry->fields.delivery_mode, entry->fields.vector,
  291. entry->fields.trig_mode);
  292. irqe.dest_id = entry->fields.dest_id;
  293. irqe.vector = entry->fields.vector;
  294. irqe.dest_mode = entry->fields.dest_mode;
  295. irqe.trig_mode = entry->fields.trig_mode;
  296. irqe.delivery_mode = entry->fields.delivery_mode << 8;
  297. irqe.level = 1;
  298. irqe.shorthand = 0;
  299. irqe.msi_redir_hint = false;
  300. if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
  301. ioapic->irr_delivered |= 1 << irq;
  302. if (irq == RTC_GSI && line_status) {
  303. /*
  304. * pending_eoi cannot ever become negative (see
  305. * rtc_status_pending_eoi_check_valid) and the caller
  306. * ensures that it is only called if it is >= zero, namely
  307. * if rtc_irq_check_coalesced returns false).
  308. */
  309. BUG_ON(ioapic->rtc_status.pending_eoi != 0);
  310. ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
  311. ioapic->rtc_status.dest_map);
  312. ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
  313. } else
  314. ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
  315. if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
  316. entry->fields.remote_irr = 1;
  317. return ret;
  318. }
  319. int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
  320. int level, bool line_status)
  321. {
  322. int ret, irq_level;
  323. BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
  324. spin_lock(&ioapic->lock);
  325. irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
  326. irq_source_id, level);
  327. ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
  328. spin_unlock(&ioapic->lock);
  329. return ret;
  330. }
  331. void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
  332. {
  333. int i;
  334. spin_lock(&ioapic->lock);
  335. for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
  336. __clear_bit(irq_source_id, &ioapic->irq_states[i]);
  337. spin_unlock(&ioapic->lock);
  338. }
  339. static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
  340. {
  341. int i;
  342. struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
  343. eoi_inject.work);
  344. spin_lock(&ioapic->lock);
  345. for (i = 0; i < IOAPIC_NUM_PINS; i++) {
  346. union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
  347. if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
  348. continue;
  349. if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
  350. ioapic_service(ioapic, i, false);
  351. }
  352. spin_unlock(&ioapic->lock);
  353. }
  354. #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
  355. static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
  356. struct kvm_ioapic *ioapic, int vector, int trigger_mode)
  357. {
  358. int i;
  359. struct kvm_lapic *apic = vcpu->arch.apic;
  360. for (i = 0; i < IOAPIC_NUM_PINS; i++) {
  361. union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
  362. if (ent->fields.vector != vector)
  363. continue;
  364. if (i == RTC_GSI)
  365. rtc_irq_eoi(ioapic, vcpu);
  366. /*
  367. * We are dropping lock while calling ack notifiers because ack
  368. * notifier callbacks for assigned devices call into IOAPIC
  369. * recursively. Since remote_irr is cleared only after call
  370. * to notifiers if the same vector will be delivered while lock
  371. * is dropped it will be put into irr and will be delivered
  372. * after ack notifier returns.
  373. */
  374. spin_unlock(&ioapic->lock);
  375. kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
  376. spin_lock(&ioapic->lock);
  377. if (trigger_mode != IOAPIC_LEVEL_TRIG ||
  378. kvm_apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
  379. continue;
  380. ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
  381. ent->fields.remote_irr = 0;
  382. if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
  383. ++ioapic->irq_eoi[i];
  384. if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
  385. /*
  386. * Real hardware does not deliver the interrupt
  387. * immediately during eoi broadcast, and this
  388. * lets a buggy guest make slow progress
  389. * even if it does not correctly handle a
  390. * level-triggered interrupt. Emulate this
  391. * behavior if we detect an interrupt storm.
  392. */
  393. schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
  394. ioapic->irq_eoi[i] = 0;
  395. trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
  396. } else {
  397. ioapic_service(ioapic, i, false);
  398. }
  399. } else {
  400. ioapic->irq_eoi[i] = 0;
  401. }
  402. }
  403. }
  404. void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
  405. {
  406. struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
  407. spin_lock(&ioapic->lock);
  408. __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode);
  409. spin_unlock(&ioapic->lock);
  410. }
  411. static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
  412. {
  413. return container_of(dev, struct kvm_ioapic, dev);
  414. }
  415. static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
  416. {
  417. return ((addr >= ioapic->base_address &&
  418. (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
  419. }
  420. static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
  421. gpa_t addr, int len, void *val)
  422. {
  423. struct kvm_ioapic *ioapic = to_ioapic(this);
  424. u32 result;
  425. if (!ioapic_in_range(ioapic, addr))
  426. return -EOPNOTSUPP;
  427. ioapic_debug("addr %lx\n", (unsigned long)addr);
  428. ASSERT(!(addr & 0xf)); /* check alignment */
  429. addr &= 0xff;
  430. spin_lock(&ioapic->lock);
  431. switch (addr) {
  432. case IOAPIC_REG_SELECT:
  433. result = ioapic->ioregsel;
  434. break;
  435. case IOAPIC_REG_WINDOW:
  436. result = ioapic_read_indirect(ioapic, addr, len);
  437. break;
  438. default:
  439. result = 0;
  440. break;
  441. }
  442. spin_unlock(&ioapic->lock);
  443. switch (len) {
  444. case 8:
  445. *(u64 *) val = result;
  446. break;
  447. case 1:
  448. case 2:
  449. case 4:
  450. memcpy(val, (char *)&result, len);
  451. break;
  452. default:
  453. printk(KERN_WARNING "ioapic: wrong length %d\n", len);
  454. }
  455. return 0;
  456. }
  457. static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
  458. gpa_t addr, int len, const void *val)
  459. {
  460. struct kvm_ioapic *ioapic = to_ioapic(this);
  461. u32 data;
  462. if (!ioapic_in_range(ioapic, addr))
  463. return -EOPNOTSUPP;
  464. ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
  465. (void*)addr, len, val);
  466. ASSERT(!(addr & 0xf)); /* check alignment */
  467. switch (len) {
  468. case 8:
  469. case 4:
  470. data = *(u32 *) val;
  471. break;
  472. case 2:
  473. data = *(u16 *) val;
  474. break;
  475. case 1:
  476. data = *(u8 *) val;
  477. break;
  478. default:
  479. printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
  480. return 0;
  481. }
  482. addr &= 0xff;
  483. spin_lock(&ioapic->lock);
  484. switch (addr) {
  485. case IOAPIC_REG_SELECT:
  486. ioapic->ioregsel = data & 0xFF; /* 8-bit register */
  487. break;
  488. case IOAPIC_REG_WINDOW:
  489. ioapic_write_indirect(ioapic, data);
  490. break;
  491. default:
  492. break;
  493. }
  494. spin_unlock(&ioapic->lock);
  495. return 0;
  496. }
  497. static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
  498. {
  499. int i;
  500. cancel_delayed_work_sync(&ioapic->eoi_inject);
  501. for (i = 0; i < IOAPIC_NUM_PINS; i++)
  502. ioapic->redirtbl[i].fields.mask = 1;
  503. ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
  504. ioapic->ioregsel = 0;
  505. ioapic->irr = 0;
  506. ioapic->irr_delivered = 0;
  507. ioapic->id = 0;
  508. memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
  509. rtc_irq_eoi_tracking_reset(ioapic);
  510. update_handled_vectors(ioapic);
  511. }
  512. static const struct kvm_io_device_ops ioapic_mmio_ops = {
  513. .read = ioapic_mmio_read,
  514. .write = ioapic_mmio_write,
  515. };
  516. int kvm_ioapic_init(struct kvm *kvm)
  517. {
  518. struct kvm_ioapic *ioapic;
  519. int ret;
  520. ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
  521. if (!ioapic)
  522. return -ENOMEM;
  523. spin_lock_init(&ioapic->lock);
  524. INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
  525. kvm->arch.vioapic = ioapic;
  526. kvm_ioapic_reset(ioapic);
  527. kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
  528. ioapic->kvm = kvm;
  529. mutex_lock(&kvm->slots_lock);
  530. ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
  531. IOAPIC_MEM_LENGTH, &ioapic->dev);
  532. mutex_unlock(&kvm->slots_lock);
  533. if (ret < 0) {
  534. kvm->arch.vioapic = NULL;
  535. kfree(ioapic);
  536. }
  537. return ret;
  538. }
  539. void kvm_ioapic_destroy(struct kvm *kvm)
  540. {
  541. struct kvm_ioapic *ioapic = kvm->arch.vioapic;
  542. cancel_delayed_work_sync(&ioapic->eoi_inject);
  543. kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
  544. kvm->arch.vioapic = NULL;
  545. kfree(ioapic);
  546. }
  547. int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
  548. {
  549. struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
  550. if (!ioapic)
  551. return -EINVAL;
  552. spin_lock(&ioapic->lock);
  553. memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
  554. state->irr &= ~ioapic->irr_delivered;
  555. spin_unlock(&ioapic->lock);
  556. return 0;
  557. }
  558. int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
  559. {
  560. struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
  561. if (!ioapic)
  562. return -EINVAL;
  563. spin_lock(&ioapic->lock);
  564. memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
  565. ioapic->irr = 0;
  566. ioapic->irr_delivered = 0;
  567. update_handled_vectors(ioapic);
  568. kvm_vcpu_request_scan_ioapic(kvm);
  569. kvm_ioapic_inject_all(ioapic, state->irr);
  570. spin_unlock(&ioapic->lock);
  571. return 0;
  572. }