|
@@ -85,6 +85,23 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
|
|
return PSCI_RET_SUCCESS;
|
|
return PSCI_RET_SUCCESS;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
|
|
|
|
+{
|
|
|
|
+ memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
|
|
|
|
+ vcpu->run->system_event.type = type;
|
|
|
|
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
|
|
|
|
+}
|
|
|
|
+
|
|
int kvm_psci_version(struct kvm_vcpu *vcpu)
|
|
int kvm_psci_version(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
|
|
if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
|
|
@@ -95,6 +112,7 @@ int kvm_psci_version(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
|
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
|
|
+ int ret = 1;
|
|
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
|
|
unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0);
|
|
unsigned long val;
|
|
unsigned long val;
|
|
|
|
|
|
@@ -114,13 +132,35 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
|
case PSCI_0_2_FN64_CPU_ON:
|
|
case PSCI_0_2_FN64_CPU_ON:
|
|
val = kvm_psci_vcpu_on(vcpu);
|
|
val = kvm_psci_vcpu_on(vcpu);
|
|
break;
|
|
break;
|
|
|
|
+ case PSCI_0_2_FN_SYSTEM_OFF:
|
|
|
|
+ kvm_psci_system_off(vcpu);
|
|
|
|
+ /*
|
|
|
|
+ * We should'nt be going back to guest VCPU after
|
|
|
|
+ * receiving SYSTEM_OFF request.
|
|
|
|
+ *
|
|
|
|
+ * If user space accidently/deliberately resumes
|
|
|
|
+ * guest VCPU after SYSTEM_OFF request then guest
|
|
|
|
+ * VCPU should see internal failure from PSCI return
|
|
|
|
+ * value. To achieve this, we preload r0 (or x0) with
|
|
|
|
+ * PSCI return value INTERNAL_FAILURE.
|
|
|
|
+ */
|
|
|
|
+ val = PSCI_RET_INTERNAL_FAILURE;
|
|
|
|
+ ret = 0;
|
|
|
|
+ break;
|
|
|
|
+ case PSCI_0_2_FN_SYSTEM_RESET:
|
|
|
|
+ kvm_psci_system_reset(vcpu);
|
|
|
|
+ /*
|
|
|
|
+ * Same reason as SYSTEM_OFF for preloading r0 (or x0)
|
|
|
|
+ * with PSCI return value INTERNAL_FAILURE.
|
|
|
|
+ */
|
|
|
|
+ val = PSCI_RET_INTERNAL_FAILURE;
|
|
|
|
+ ret = 0;
|
|
|
|
+ break;
|
|
case PSCI_0_2_FN_CPU_SUSPEND:
|
|
case PSCI_0_2_FN_CPU_SUSPEND:
|
|
case PSCI_0_2_FN_AFFINITY_INFO:
|
|
case PSCI_0_2_FN_AFFINITY_INFO:
|
|
case PSCI_0_2_FN_MIGRATE:
|
|
case PSCI_0_2_FN_MIGRATE:
|
|
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
|
|
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
|
|
case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
|
|
case PSCI_0_2_FN_MIGRATE_INFO_UP_CPU:
|
|
- case PSCI_0_2_FN_SYSTEM_OFF:
|
|
|
|
- case PSCI_0_2_FN_SYSTEM_RESET:
|
|
|
|
case PSCI_0_2_FN64_CPU_SUSPEND:
|
|
case PSCI_0_2_FN64_CPU_SUSPEND:
|
|
case PSCI_0_2_FN64_AFFINITY_INFO:
|
|
case PSCI_0_2_FN64_AFFINITY_INFO:
|
|
case PSCI_0_2_FN64_MIGRATE:
|
|
case PSCI_0_2_FN64_MIGRATE:
|
|
@@ -132,7 +172,7 @@ static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
|
|
}
|
|
}
|
|
|
|
|
|
*vcpu_reg(vcpu, 0) = val;
|
|
*vcpu_reg(vcpu, 0) = val;
|
|
- return 1;
|
|
|
|
|
|
+ return ret;
|
|
}
|
|
}
|
|
|
|
|
|
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|
|
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
|