dyntrans.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/highmem.h>
  14. #include <linux/kvm_host.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/fs.h>
  18. #include <linux/memblock.h>
  19. #include <asm/cacheflush.h>
  20. #include "commpage.h"
  21. /**
  22. * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
  23. * @vcpu: Virtual CPU.
  24. * @opc: PC of instruction to replace.
  25. * @replace: Instruction to write
  26. */
  27. static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
  28. union mips_instruction replace)
  29. {
  30. unsigned long vaddr = (unsigned long)opc;
  31. int err;
  32. retry:
  33. /* The GVA page table is still active so use the Linux TLB handlers */
  34. kvm_trap_emul_gva_lockless_begin(vcpu);
  35. err = put_user(replace.word, opc);
  36. kvm_trap_emul_gva_lockless_end(vcpu);
  37. if (unlikely(err)) {
  38. /*
  39. * We write protect clean pages in GVA page table so normal
  40. * Linux TLB mod handler doesn't silently dirty the page.
  41. * Its also possible we raced with a GVA invalidation.
  42. * Try to force the page to become dirty.
  43. */
  44. err = kvm_trap_emul_gva_fault(vcpu, vaddr, true);
  45. if (unlikely(err)) {
  46. kvm_info("%s: Address unwriteable: %p\n",
  47. __func__, opc);
  48. return -EFAULT;
  49. }
  50. /*
  51. * Try again. This will likely trigger a TLB refill, which will
  52. * fetch the new dirty entry from the GVA page table, which
  53. * should then succeed.
  54. */
  55. goto retry;
  56. }
  57. __local_flush_icache_user_range(vaddr, vaddr + 4);
  58. return 0;
  59. }
  60. int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
  61. struct kvm_vcpu *vcpu)
  62. {
  63. union mips_instruction nop_inst = { 0 };
  64. /* Replace the CACHE instruction, with a NOP */
  65. return kvm_mips_trans_replace(vcpu, opc, nop_inst);
  66. }
  67. /*
  68. * Address based CACHE instructions are transformed into synci(s). A little
  69. * heavy for just D-cache invalidates, but avoids an expensive trap
  70. */
  71. int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
  72. struct kvm_vcpu *vcpu)
  73. {
  74. union mips_instruction synci_inst = { 0 };
  75. synci_inst.i_format.opcode = bcond_op;
  76. synci_inst.i_format.rs = inst.i_format.rs;
  77. synci_inst.i_format.rt = synci_op;
  78. if (cpu_has_mips_r6)
  79. synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
  80. else
  81. synci_inst.i_format.simmediate = inst.i_format.simmediate;
  82. return kvm_mips_trans_replace(vcpu, opc, synci_inst);
  83. }
  84. int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
  85. struct kvm_vcpu *vcpu)
  86. {
  87. union mips_instruction mfc0_inst = { 0 };
  88. u32 rd, sel;
  89. rd = inst.c0r_format.rd;
  90. sel = inst.c0r_format.sel;
  91. if (rd == MIPS_CP0_ERRCTL && sel == 0) {
  92. mfc0_inst.r_format.opcode = spec_op;
  93. mfc0_inst.r_format.rd = inst.c0r_format.rt;
  94. mfc0_inst.r_format.func = add_op;
  95. } else {
  96. mfc0_inst.i_format.opcode = lw_op;
  97. mfc0_inst.i_format.rt = inst.c0r_format.rt;
  98. mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
  99. offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
  100. #ifdef CONFIG_CPU_BIG_ENDIAN
  101. if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
  102. mfc0_inst.i_format.simmediate |= 4;
  103. #endif
  104. }
  105. return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
  106. }
  107. int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
  108. struct kvm_vcpu *vcpu)
  109. {
  110. union mips_instruction mtc0_inst = { 0 };
  111. u32 rd, sel;
  112. rd = inst.c0r_format.rd;
  113. sel = inst.c0r_format.sel;
  114. mtc0_inst.i_format.opcode = sw_op;
  115. mtc0_inst.i_format.rt = inst.c0r_format.rt;
  116. mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
  117. offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
  118. #ifdef CONFIG_CPU_BIG_ENDIAN
  119. if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
  120. mtc0_inst.i_format.simmediate |= 4;
  121. #endif
  122. return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
  123. }