|
@@ -170,6 +170,26 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
|
{
|
|
{
|
|
exit_handle_fn exit_handler;
|
|
exit_handle_fn exit_handler;
|
|
|
|
|
|
|
|
+ if (ARM_SERROR_PENDING(exception_index)) {
|
|
|
|
+ u8 hsr_ec = ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * HVC/SMC already have an adjusted PC, which we need
|
|
|
|
+ * to correct in order to return to after having
|
|
|
|
+ * injected the SError.
|
|
|
|
+ */
|
|
|
|
+ if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 ||
|
|
|
|
+ hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) {
|
|
|
|
+ u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
|
|
|
|
+ *vcpu_pc(vcpu) -= adj;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ kvm_inject_vabt(vcpu);
|
|
|
|
+ return 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ exception_index = ARM_EXCEPTION_CODE(exception_index);
|
|
|
|
+
|
|
switch (exception_index) {
|
|
switch (exception_index) {
|
|
case ARM_EXCEPTION_IRQ:
|
|
case ARM_EXCEPTION_IRQ:
|
|
return 1;
|
|
return 1;
|