fpu.S 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * FPU context handling code for KVM.
  7. *
  8. * Copyright (C) 2015 Imagination Technologies Ltd.
  9. */
  10. #include <asm/asm.h>
  11. #include <asm/asm-offsets.h>
  12. #include <asm/fpregdef.h>
  13. #include <asm/mipsregs.h>
  14. #include <asm/regdef.h>
  15. .set noreorder
  16. .set noat
  17. LEAF(__kvm_save_fpu)
  18. .set push
  19. .set mips64r2
  20. SET_HARDFLOAT
  21. mfc0 t0, CP0_STATUS
  22. sll t0, t0, 5 # is Status.FR set?
  23. bgez t0, 1f # no: skip odd doubles
  24. nop
  25. sdc1 $f1, VCPU_FPR1(a0)
  26. sdc1 $f3, VCPU_FPR3(a0)
  27. sdc1 $f5, VCPU_FPR5(a0)
  28. sdc1 $f7, VCPU_FPR7(a0)
  29. sdc1 $f9, VCPU_FPR9(a0)
  30. sdc1 $f11, VCPU_FPR11(a0)
  31. sdc1 $f13, VCPU_FPR13(a0)
  32. sdc1 $f15, VCPU_FPR15(a0)
  33. sdc1 $f17, VCPU_FPR17(a0)
  34. sdc1 $f19, VCPU_FPR19(a0)
  35. sdc1 $f21, VCPU_FPR21(a0)
  36. sdc1 $f23, VCPU_FPR23(a0)
  37. sdc1 $f25, VCPU_FPR25(a0)
  38. sdc1 $f27, VCPU_FPR27(a0)
  39. sdc1 $f29, VCPU_FPR29(a0)
  40. sdc1 $f31, VCPU_FPR31(a0)
  41. 1: sdc1 $f0, VCPU_FPR0(a0)
  42. sdc1 $f2, VCPU_FPR2(a0)
  43. sdc1 $f4, VCPU_FPR4(a0)
  44. sdc1 $f6, VCPU_FPR6(a0)
  45. sdc1 $f8, VCPU_FPR8(a0)
  46. sdc1 $f10, VCPU_FPR10(a0)
  47. sdc1 $f12, VCPU_FPR12(a0)
  48. sdc1 $f14, VCPU_FPR14(a0)
  49. sdc1 $f16, VCPU_FPR16(a0)
  50. sdc1 $f18, VCPU_FPR18(a0)
  51. sdc1 $f20, VCPU_FPR20(a0)
  52. sdc1 $f22, VCPU_FPR22(a0)
  53. sdc1 $f24, VCPU_FPR24(a0)
  54. sdc1 $f26, VCPU_FPR26(a0)
  55. sdc1 $f28, VCPU_FPR28(a0)
  56. jr ra
  57. sdc1 $f30, VCPU_FPR30(a0)
  58. .set pop
  59. END(__kvm_save_fpu)
  60. LEAF(__kvm_restore_fpu)
  61. .set push
  62. .set mips64r2
  63. SET_HARDFLOAT
  64. mfc0 t0, CP0_STATUS
  65. sll t0, t0, 5 # is Status.FR set?
  66. bgez t0, 1f # no: skip odd doubles
  67. nop
  68. ldc1 $f1, VCPU_FPR1(a0)
  69. ldc1 $f3, VCPU_FPR3(a0)
  70. ldc1 $f5, VCPU_FPR5(a0)
  71. ldc1 $f7, VCPU_FPR7(a0)
  72. ldc1 $f9, VCPU_FPR9(a0)
  73. ldc1 $f11, VCPU_FPR11(a0)
  74. ldc1 $f13, VCPU_FPR13(a0)
  75. ldc1 $f15, VCPU_FPR15(a0)
  76. ldc1 $f17, VCPU_FPR17(a0)
  77. ldc1 $f19, VCPU_FPR19(a0)
  78. ldc1 $f21, VCPU_FPR21(a0)
  79. ldc1 $f23, VCPU_FPR23(a0)
  80. ldc1 $f25, VCPU_FPR25(a0)
  81. ldc1 $f27, VCPU_FPR27(a0)
  82. ldc1 $f29, VCPU_FPR29(a0)
  83. ldc1 $f31, VCPU_FPR31(a0)
  84. 1: ldc1 $f0, VCPU_FPR0(a0)
  85. ldc1 $f2, VCPU_FPR2(a0)
  86. ldc1 $f4, VCPU_FPR4(a0)
  87. ldc1 $f6, VCPU_FPR6(a0)
  88. ldc1 $f8, VCPU_FPR8(a0)
  89. ldc1 $f10, VCPU_FPR10(a0)
  90. ldc1 $f12, VCPU_FPR12(a0)
  91. ldc1 $f14, VCPU_FPR14(a0)
  92. ldc1 $f16, VCPU_FPR16(a0)
  93. ldc1 $f18, VCPU_FPR18(a0)
  94. ldc1 $f20, VCPU_FPR20(a0)
  95. ldc1 $f22, VCPU_FPR22(a0)
  96. ldc1 $f24, VCPU_FPR24(a0)
  97. ldc1 $f26, VCPU_FPR26(a0)
  98. ldc1 $f28, VCPU_FPR28(a0)
  99. jr ra
  100. ldc1 $f30, VCPU_FPR30(a0)
  101. .set pop
  102. END(__kvm_restore_fpu)
  103. LEAF(__kvm_restore_fcsr)
  104. .set push
  105. SET_HARDFLOAT
  106. lw t0, VCPU_FCR31(a0)
  107. /*
  108. * The ctc1 must stay at this offset in __kvm_restore_fcsr.
  109. * See kvm_mips_csr_die_notify() which handles t0 containing a value
  110. * which triggers an FP Exception, which must be stepped over and
  111. * ignored since the set cause bits must remain there for the guest.
  112. */
  113. ctc1 t0, fcr31
  114. jr ra
  115. nop
  116. .set pop
  117. END(__kvm_restore_fcsr)