kvm_mips_dyntrans.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/kvm_host.h>
  14. #include <linux/module.h>
  15. #include <linux/vmalloc.h>
  16. #include <linux/fs.h>
  17. #include <linux/bootmem.h>
  18. #include <asm/cacheflush.h>
  19. #include "kvm_mips_comm.h"
  20. #define SYNCI_TEMPLATE 0x041f0000
  21. #define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
  22. #define SYNCI_OFFSET ((x) & 0xffff)
  23. #define LW_TEMPLATE 0x8c000000
  24. #define CLEAR_TEMPLATE 0x00000020
  25. #define SW_TEMPLATE 0xac000000
  26. int
  27. kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
  28. struct kvm_vcpu *vcpu)
  29. {
  30. int result = 0;
  31. unsigned long kseg0_opc;
  32. uint32_t synci_inst = 0x0;
  33. /* Replace the CACHE instruction, with a NOP */
  34. kseg0_opc =
  35. CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
  36. (vcpu, (unsigned long) opc));
  37. memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
  38. local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
  39. return result;
  40. }
  41. /*
  42. * Address based CACHE instructions are transformed into synci(s). A little heavy
  43. * for just D-cache invalidates, but avoids an expensive trap
  44. */
  45. int
  46. kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
  47. struct kvm_vcpu *vcpu)
  48. {
  49. int result = 0;
  50. unsigned long kseg0_opc;
  51. uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
  52. base = (inst >> 21) & 0x1f;
  53. offset = inst & 0xffff;
  54. synci_inst |= (base << 21);
  55. synci_inst |= offset;
  56. kseg0_opc =
  57. CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
  58. (vcpu, (unsigned long) opc));
  59. memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
  60. local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
  61. return result;
  62. }
  63. int
  64. kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
  65. {
  66. int32_t rt, rd, sel;
  67. uint32_t mfc0_inst;
  68. unsigned long kseg0_opc, flags;
  69. rt = (inst >> 16) & 0x1f;
  70. rd = (inst >> 11) & 0x1f;
  71. sel = inst & 0x7;
  72. if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
  73. mfc0_inst = CLEAR_TEMPLATE;
  74. mfc0_inst |= ((rt & 0x1f) << 16);
  75. } else {
  76. mfc0_inst = LW_TEMPLATE;
  77. mfc0_inst |= ((rt & 0x1f) << 16);
  78. mfc0_inst |=
  79. offsetof(struct mips_coproc,
  80. reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
  81. cop0);
  82. }
  83. if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
  84. kseg0_opc =
  85. CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
  86. (vcpu, (unsigned long) opc));
  87. memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
  88. local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
  89. } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
  90. local_irq_save(flags);
  91. memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
  92. local_flush_icache_range((unsigned long)opc,
  93. (unsigned long)opc + 32);
  94. local_irq_restore(flags);
  95. } else {
  96. kvm_err("%s: Invalid address: %p\n", __func__, opc);
  97. return -EFAULT;
  98. }
  99. return 0;
  100. }
  101. int
  102. kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
  103. {
  104. int32_t rt, rd, sel;
  105. uint32_t mtc0_inst = SW_TEMPLATE;
  106. unsigned long kseg0_opc, flags;
  107. rt = (inst >> 16) & 0x1f;
  108. rd = (inst >> 11) & 0x1f;
  109. sel = inst & 0x7;
  110. mtc0_inst |= ((rt & 0x1f) << 16);
  111. mtc0_inst |=
  112. offsetof(struct mips_coproc,
  113. reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
  114. if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
  115. kseg0_opc =
  116. CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
  117. (vcpu, (unsigned long) opc));
  118. memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
  119. local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
  120. } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
  121. local_irq_save(flags);
  122. memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
  123. local_flush_icache_range((unsigned long)opc,
  124. (unsigned long)opc + 32);
  125. local_irq_restore(flags);
  126. } else {
  127. kvm_err("%s: Invalid address: %p\n", __func__, opc);
  128. return -EFAULT;
  129. }
  130. return 0;
  131. }