psci.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. /*
  2. * Copyright (C) 2012 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/kvm_host.h>
  18. #include <linux/wait.h>
  19. #include <asm/cputype.h>
  20. #include <asm/kvm_emulate.h>
  21. #include <asm/kvm_psci.h>
  22. /*
  23. * This is an implementation of the Power State Coordination Interface
  24. * as described in ARM document number ARM DEN 0022A.
  25. */
  26. #define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
  27. static unsigned long psci_affinity_mask(unsigned long affinity_level)
  28. {
  29. if (affinity_level <= 3)
  30. return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
  31. return 0;
  32. }
  33. static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
  34. {
  35. /*
  36. * NOTE: For simplicity, we make VCPU suspend emulation to be
  37. * same-as WFI (Wait-for-interrupt) emulation.
  38. *
  39. * This means for KVM the wakeup events are interrupts and
  40. * this is consistent with intended use of StateID as described
  41. * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
  42. *
  43. * Further, we also treat power-down request to be same as
  44. * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
  45. * specification (ARM DEN 0022A). This means all suspend states
  46. * for KVM will preserve the register state.
  47. */
  48. kvm_vcpu_block(vcpu);
  49. return PSCI_RET_SUCCESS;
  50. }
  51. static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
  52. {
  53. vcpu->arch.pause = true;
  54. }
  55. static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
  56. {
  57. struct kvm *kvm = source_vcpu->kvm;
  58. struct kvm_vcpu *vcpu = NULL, *tmp;
  59. wait_queue_head_t *wq;
  60. unsigned long cpu_id;
  61. unsigned long context_id;
  62. unsigned long mpidr;
  63. phys_addr_t target_pc;
  64. int i;
  65. cpu_id = *vcpu_reg(source_vcpu, 1);
  66. if (vcpu_mode_is_32bit(source_vcpu))
  67. cpu_id &= ~((u32) 0);
  68. kvm_for_each_vcpu(i, tmp, kvm) {
  69. mpidr = kvm_vcpu_get_mpidr(tmp);
  70. if ((mpidr & MPIDR_HWID_BITMASK) == (cpu_id & MPIDR_HWID_BITMASK)) {
  71. vcpu = tmp;
  72. break;
  73. }
  74. }
  75. /*
  76. * Make sure the caller requested a valid CPU and that the CPU is
  77. * turned off.
  78. */
  79. if (!vcpu)
  80. return PSCI_RET_INVALID_PARAMS;
  81. if (!vcpu->arch.pause) {
  82. if (kvm_psci_version(source_vcpu) != KVM_ARM_PSCI_0_1)
  83. return PSCI_RET_ALREADY_ON;
  84. else
  85. return PSCI_RET_INVALID_PARAMS;
  86. }
  87. target_pc = *vcpu_reg(source_vcpu, 2);
  88. context_id = *vcpu_reg(source_vcpu, 3);
  89. kvm_reset_vcpu(vcpu);
  90. /* Gracefully handle Thumb2 entry point */
  91. if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
  92. target_pc &= ~((phys_addr_t) 1);
  93. vcpu_set_thumb(vcpu);
  94. }
  95. /* Propagate caller endianness */
  96. if (kvm_vcpu_is_be(source_vcpu))
  97. kvm_vcpu_set_be(vcpu);
  98. *vcpu_pc(vcpu) = target_pc;
  99. /*
  100. * NOTE: We always update r0 (or x0) because for PSCI v0.1
  101. * the general puspose registers are undefined upon CPU_ON.
  102. */
  103. *vcpu_reg(vcpu, 0) = context_id;
  104. vcpu->arch.pause = false;
  105. smp_mb(); /* Make sure the above is visible */
  106. wq = kvm_arch_vcpu_wq(vcpu);
  107. wake_up_interruptible(wq);
  108. return PSCI_RET_SUCCESS;
  109. }
  110. static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
  111. {
  112. int i;
  113. unsigned long mpidr;
  114. unsigned long target_affinity;
  115. unsigned long target_affinity_mask;
  116. unsigned long lowest_affinity_level;
  117. struct kvm *kvm = vcpu->kvm;
  118. struct kvm_vcpu *tmp;
  119. target_affinity = *vcpu_reg(vcpu, 1);
  120. lowest_affinity_level = *vcpu_reg(vcpu, 2);
  121. /* Determine target affinity mask */
  122. target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
  123. if (!target_affinity_mask)
  124. return PSCI_RET_INVALID_PARAMS;
  125. /* Ignore other bits of target affinity */
  126. target_affinity &= target_affinity_mask;
  127. /*
  128. * If one or more VCPU matching target affinity are running
  129. * then ON else OFF
  130. */
  131. kvm_for_each_vcpu(i, tmp, kvm) {
  132. mpidr = kvm_vcpu_get_mpidr(tmp);
  133. if (((mpidr & target_affinity_mask) == target_affinity) &&
  134. !tmp->arch.pause) {
  135. return PSCI_0_2_AFFINITY_LEVEL_ON;
  136. }
  137. }
  138. return PSCI_0_2_AFFINITY_LEVEL_OFF;
  139. }
  140. static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
  141. {
  142. memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
  143. vcpu->run->system_event.type = type;
  144. vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
  145. }
  146. static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
  147. {
  148. kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
  149. }
  150. static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
  151. {
  152. kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
  153. }
  154. int kvm_psci_version(struct kvm_vcpu *vcpu)
  155. {
  156. if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
  157. return KVM_ARM_PSCI_0_2;
  158. return KVM_ARM_PSCI_0_1;
  159. }
  160. static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
  161. {
  162. int ret = 1;
  163. unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
  164. unsigned long val;
  165. switch (psci_fn) {
  166. case PSCI_0_2_FN_PSCI_VERSION:
  167. /*
  168. * Bits[31:16] = Major Version = 0
  169. * Bits[15:0] = Minor Version = 2
  170. */
  171. val = 2;
  172. break;
  173. case PSCI_0_2_FN_CPU_SUSPEND:
  174. case PSCI_0_2_FN64_CPU_SUSPEND:
  175. val = kvm_psci_vcpu_suspend(vcpu);
  176. break;
  177. case PSCI_0_2_FN_CPU_OFF:
  178. kvm_psci_vcpu_off(vcpu);
  179. val = PSCI_RET_SUCCESS;
  180. break;
  181. case PSCI_0_2_FN_CPU_ON:
  182. case PSCI_0_2_FN64_CPU_ON:
  183. val = kvm_psci_vcpu_on(vcpu);
  184. break;
  185. case PSCI_0_2_FN_AFFINITY_INFO:
  186. case PSCI_0_2_FN64_AFFINITY_INFO:
  187. val = kvm_psci_vcpu_affinity_info(vcpu);
  188. break;
  189. case PSCI_0_2_FN_MIGRATE:
  190. case PSCI_0_2_FN64_MIGRATE:
  191. val = PSCI_RET_NOT_SUPPORTED;
  192. break;
  193. case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
  194. /*
  195. * Trusted OS is MP hence does not require migration
  196. * or
  197. * Trusted OS is not present
  198. */
  199. val = PSCI_0_2_TOS_MP;
  200. break;
  201. case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
  202. case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
  203. val = PSCI_RET_NOT_SUPPORTED;
  204. break;
  205. case PSCI_0_2_FN_SYSTEM_OFF:
  206. kvm_psci_system_off(vcpu);
  207. /*
  208. * We should'nt be going back to guest VCPU after
  209. * receiving SYSTEM_OFF request.
  210. *
  211. * If user space accidently/deliberately resumes
  212. * guest VCPU after SYSTEM_OFF request then guest
  213. * VCPU should see internal failure from PSCI return
  214. * value. To achieve this, we preload r0 (or x0) with
  215. * PSCI return value INTERNAL_FAILURE.
  216. */
  217. val = PSCI_RET_INTERNAL_FAILURE;
  218. ret = 0;
  219. break;
  220. case PSCI_0_2_FN_SYSTEM_RESET:
  221. kvm_psci_system_reset(vcpu);
  222. /*
  223. * Same reason as SYSTEM_OFF for preloading r0 (or x0)
  224. * with PSCI return value INTERNAL_FAILURE.
  225. */
  226. val = PSCI_RET_INTERNAL_FAILURE;
  227. ret = 0;
  228. break;
  229. default:
  230. return -EINVAL;
  231. }
  232. *vcpu_reg(vcpu, 0) = val;
  233. return ret;
  234. }
  235. static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
  236. {
  237. unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
  238. unsigned long val;
  239. switch (psci_fn) {
  240. case KVM_PSCI_FN_CPU_OFF:
  241. kvm_psci_vcpu_off(vcpu);
  242. val = PSCI_RET_SUCCESS;
  243. break;
  244. case KVM_PSCI_FN_CPU_ON:
  245. val = kvm_psci_vcpu_on(vcpu);
  246. break;
  247. case KVM_PSCI_FN_CPU_SUSPEND:
  248. case KVM_PSCI_FN_MIGRATE:
  249. val = PSCI_RET_NOT_SUPPORTED;
  250. break;
  251. default:
  252. return -EINVAL;
  253. }
  254. *vcpu_reg(vcpu, 0) = val;
  255. return 1;
  256. }
  257. /**
  258. * kvm_psci_call - handle PSCI call if r0 value is in range
  259. * @vcpu: Pointer to the VCPU struct
  260. *
  261. * Handle PSCI calls from guests through traps from HVC instructions.
  262. * The calling convention is similar to SMC calls to the secure world
  263. * where the function number is placed in r0.
  264. *
  265. * This function returns: > 0 (success), 0 (success but exit to user
  266. * space), and < 0 (errors)
  267. *
  268. * Errors:
  269. * -EINVAL: Unrecognized PSCI function
  270. */
  271. int kvm_psci_call(struct kvm_vcpu *vcpu)
  272. {
  273. switch (kvm_psci_version(vcpu)) {
  274. case KVM_ARM_PSCI_0_2:
  275. return kvm_psci_0_2_call(vcpu);
  276. case KVM_ARM_PSCI_0_1:
  277. return kvm_psci_0_1_call(vcpu);
  278. default:
  279. return -EINVAL;
  280. };
  281. }