psci.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. /*
  2. * Copyright (C) 2012 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/kvm_host.h>
  18. #include <linux/wait.h>
  19. #include <asm/cputype.h>
  20. #include <asm/kvm_emulate.h>
  21. #include <asm/kvm_psci.h>
  22. /*
  23. * This is an implementation of the Power State Coordination Interface
  24. * as described in ARM document number ARM DEN 0022A.
  25. */
  26. static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
  27. {
  28. vcpu->arch.pause = true;
  29. }
  30. static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
  31. {
  32. struct kvm *kvm = source_vcpu->kvm;
  33. struct kvm_vcpu *vcpu = NULL, *tmp;
  34. wait_queue_head_t *wq;
  35. unsigned long cpu_id;
  36. unsigned long mpidr;
  37. phys_addr_t target_pc;
  38. int i;
  39. cpu_id = *vcpu_reg(source_vcpu, 1);
  40. if (vcpu_mode_is_32bit(source_vcpu))
  41. cpu_id &= ~((u32) 0);
  42. kvm_for_each_vcpu(i, tmp, kvm) {
  43. mpidr = kvm_vcpu_get_mpidr(tmp);
  44. if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
  45. vcpu = tmp;
  46. break;
  47. }
  48. }
  49. /*
  50. * Make sure the caller requested a valid CPU and that the CPU is
  51. * turned off.
  52. */
  53. if (!vcpu || !vcpu->arch.pause)
  54. return PSCI_RET_INVALID_PARAMS;
  55. target_pc = *vcpu_reg(source_vcpu, 2);
  56. kvm_reset_vcpu(vcpu);
  57. /* Gracefully handle Thumb2 entry point */
  58. if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
  59. target_pc &= ~((phys_addr_t) 1);
  60. vcpu_set_thumb(vcpu);
  61. }
  62. /* Propagate caller endianness */
  63. if (kvm_vcpu_is_be(source_vcpu))
  64. kvm_vcpu_set_be(vcpu);
  65. *vcpu_pc(vcpu) = target_pc;
  66. vcpu->arch.pause = false;
  67. smp_mb(); /* Make sure the above is visible */
  68. wq = kvm_arch_vcpu_wq(vcpu);
  69. wake_up_interruptible(wq);
  70. return PSCI_RET_SUCCESS;
  71. }
  72. int kvm_psci_version(struct kvm_vcpu *vcpu)
  73. {
  74. if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
  75. return KVM_ARM_PSCI_0_2;
  76. return KVM_ARM_PSCI_0_1;
  77. }
  78. static bool kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
  79. {
  80. unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
  81. unsigned long val;
  82. switch (psci_fn) {
  83. case PSCI_0_2_FN_PSCI_VERSION:
  84. /*
  85. * Bits[31:16] = Major Version = 0
  86. * Bits[15:0] = Minor Version = 2
  87. */
  88. val = 2;
  89. break;
  90. case PSCI_0_2_FN_CPU_OFF:
  91. kvm_psci_vcpu_off(vcpu);
  92. val = PSCI_RET_SUCCESS;
  93. break;
  94. case PSCI_0_2_FN_CPU_ON:
  95. case PSCI_0_2_FN64_CPU_ON:
  96. val = kvm_psci_vcpu_on(vcpu);
  97. break;
  98. case PSCI_0_2_FN_CPU_SUSPEND:
  99. case PSCI_0_2_FN_AFFINITY_INFO:
  100. case PSCI_0_2_FN_MIGRATE:
  101. case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
  102. case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
  103. case PSCI_0_2_FN_SYSTEM_OFF:
  104. case PSCI_0_2_FN_SYSTEM_RESET:
  105. case PSCI_0_2_FN64_CPU_SUSPEND:
  106. case PSCI_0_2_FN64_AFFINITY_INFO:
  107. case PSCI_0_2_FN64_MIGRATE:
  108. case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
  109. val = PSCI_RET_NOT_SUPPORTED;
  110. break;
  111. default:
  112. return false;
  113. }
  114. *vcpu_reg(vcpu, 0) = val;
  115. return true;
  116. }
  117. static bool kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
  118. {
  119. unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
  120. unsigned long val;
  121. switch (psci_fn) {
  122. case KVM_PSCI_FN_CPU_OFF:
  123. kvm_psci_vcpu_off(vcpu);
  124. val = PSCI_RET_SUCCESS;
  125. break;
  126. case KVM_PSCI_FN_CPU_ON:
  127. val = kvm_psci_vcpu_on(vcpu);
  128. break;
  129. case KVM_PSCI_FN_CPU_SUSPEND:
  130. case KVM_PSCI_FN_MIGRATE:
  131. val = PSCI_RET_NOT_SUPPORTED;
  132. break;
  133. default:
  134. return false;
  135. }
  136. *vcpu_reg(vcpu, 0) = val;
  137. return true;
  138. }
  139. /**
  140. * kvm_psci_call - handle PSCI call if r0 value is in range
  141. * @vcpu: Pointer to the VCPU struct
  142. *
  143. * Handle PSCI calls from guests through traps from HVC instructions.
  144. * The calling convention is similar to SMC calls to the secure world where
  145. * the function number is placed in r0 and this function returns true if the
  146. * function number specified in r0 is withing the PSCI range, and false
  147. * otherwise.
  148. */
  149. bool kvm_psci_call(struct kvm_vcpu *vcpu)
  150. {
  151. switch (kvm_psci_version(vcpu)) {
  152. case KVM_ARM_PSCI_0_2:
  153. return kvm_psci_0_2_call(vcpu);
  154. case KVM_ARM_PSCI_0_1:
  155. return kvm_psci_0_1_call(vcpu);
  156. default:
  157. return false;
  158. };
  159. }