x86.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173
  1. #ifndef ARCH_X86_KVM_X86_H
  2. #define ARCH_X86_KVM_X86_H
  3. #include <linux/kvm_host.h>
  4. #include "kvm_cache_regs.h"
  5. static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu)
  6. {
  7. vcpu->arch.exception.pending = false;
  8. }
  9. static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector,
  10. bool soft)
  11. {
  12. vcpu->arch.interrupt.pending = true;
  13. vcpu->arch.interrupt.soft = soft;
  14. vcpu->arch.interrupt.nr = vector;
  15. }
  16. static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu)
  17. {
  18. vcpu->arch.interrupt.pending = false;
  19. }
  20. static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu)
  21. {
  22. return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending ||
  23. vcpu->arch.nmi_injected;
  24. }
  25. static inline bool kvm_exception_is_soft(unsigned int nr)
  26. {
  27. return (nr == BP_VECTOR) || (nr == OF_VECTOR);
  28. }
  29. static inline bool is_protmode(struct kvm_vcpu *vcpu)
  30. {
  31. return kvm_read_cr0_bits(vcpu, X86_CR0_PE);
  32. }
  33. static inline int is_long_mode(struct kvm_vcpu *vcpu)
  34. {
  35. #ifdef CONFIG_X86_64
  36. return vcpu->arch.efer & EFER_LMA;
  37. #else
  38. return 0;
  39. #endif
  40. }
  41. static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
  42. {
  43. int cs_db, cs_l;
  44. if (!is_long_mode(vcpu))
  45. return false;
  46. kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
  47. return cs_l;
  48. }
  49. static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
  50. {
  51. return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
  52. }
  53. static inline int is_pae(struct kvm_vcpu *vcpu)
  54. {
  55. return kvm_read_cr4_bits(vcpu, X86_CR4_PAE);
  56. }
  57. static inline int is_pse(struct kvm_vcpu *vcpu)
  58. {
  59. return kvm_read_cr4_bits(vcpu, X86_CR4_PSE);
  60. }
  61. static inline int is_paging(struct kvm_vcpu *vcpu)
  62. {
  63. return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
  64. }
  65. static inline u32 bit(int bitno)
  66. {
  67. return 1 << (bitno & 31);
  68. }
  69. static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu,
  70. gva_t gva, gfn_t gfn, unsigned access)
  71. {
  72. vcpu->arch.mmio_gva = gva & PAGE_MASK;
  73. vcpu->arch.access = access;
  74. vcpu->arch.mmio_gfn = gfn;
  75. vcpu->arch.mmio_gen = kvm_memslots(vcpu->kvm)->generation;
  76. }
  77. static inline bool vcpu_match_mmio_gen(struct kvm_vcpu *vcpu)
  78. {
  79. return vcpu->arch.mmio_gen == kvm_memslots(vcpu->kvm)->generation;
  80. }
  81. /*
  82. * Clear the mmio cache info for the given gva. If gva is MMIO_GVA_ANY, we
  83. * clear all mmio cache info.
  84. */
  85. #define MMIO_GVA_ANY (~(gva_t)0)
  86. static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva)
  87. {
  88. if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK))
  89. return;
  90. vcpu->arch.mmio_gva = 0;
  91. }
  92. static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva)
  93. {
  94. if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gva &&
  95. vcpu->arch.mmio_gva == (gva & PAGE_MASK))
  96. return true;
  97. return false;
  98. }
  99. static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
  100. {
  101. if (vcpu_match_mmio_gen(vcpu) && vcpu->arch.mmio_gfn &&
  102. vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT)
  103. return true;
  104. return false;
  105. }
  106. static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
  107. enum kvm_reg reg)
  108. {
  109. unsigned long val = kvm_register_read(vcpu, reg);
  110. return is_64_bit_mode(vcpu) ? val : (u32)val;
  111. }
  112. static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
  113. enum kvm_reg reg,
  114. unsigned long val)
  115. {
  116. if (!is_64_bit_mode(vcpu))
  117. val = (u32)val;
  118. return kvm_register_write(vcpu, reg, val);
  119. }
  120. void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
  121. void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
  122. int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
  123. void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
  124. int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
  125. gva_t addr, void *val, unsigned int bytes,
  126. struct x86_exception *exception);
  127. int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
  128. gva_t addr, void *val, unsigned int bytes,
  129. struct x86_exception *exception);
  130. bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data);
  131. #define KVM_SUPPORTED_XCR0 (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
  132. | XSTATE_BNDREGS | XSTATE_BNDCSR)
  133. extern u64 host_xcr0;
  134. extern u64 kvm_supported_xcr0(void);
  135. extern unsigned int min_timer_period_us;
  136. extern struct static_key kvm_no_apic_vcpu;
  137. #endif