|
@@ -388,14 +388,13 @@ void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu)
|
|
#define per_write_wp_event(code) \
|
|
#define per_write_wp_event(code) \
|
|
(code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
|
|
(code & (PER_CODE_STORE | PER_CODE_STORE_REAL))
|
|
|
|
|
|
-static int debug_exit_required(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
+static int debug_exit_required(struct kvm_vcpu *vcpu, u8 perc,
|
|
|
|
+ unsigned long peraddr)
|
|
{
|
|
{
|
|
- u8 perc = vcpu->arch.sie_block->perc;
|
|
|
|
struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
|
|
struct kvm_debug_exit_arch *debug_exit = &vcpu->run->debug.arch;
|
|
struct kvm_hw_wp_info_arch *wp_info = NULL;
|
|
struct kvm_hw_wp_info_arch *wp_info = NULL;
|
|
struct kvm_hw_bp_info_arch *bp_info = NULL;
|
|
struct kvm_hw_bp_info_arch *bp_info = NULL;
|
|
unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
|
|
unsigned long addr = vcpu->arch.sie_block->gpsw.addr;
|
|
- unsigned long peraddr = vcpu->arch.sie_block->peraddr;
|
|
|
|
|
|
|
|
if (guestdbg_hw_bp_enabled(vcpu)) {
|
|
if (guestdbg_hw_bp_enabled(vcpu)) {
|
|
if (per_write_wp_event(perc) &&
|
|
if (per_write_wp_event(perc) &&
|
|
@@ -437,36 +436,118 @@ exit_required:
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int per_fetched_addr(struct kvm_vcpu *vcpu, unsigned long *addr)
|
|
|
|
+{
|
|
|
|
+ u8 exec_ilen = 0;
|
|
|
|
+ u16 opcode[3];
|
|
|
|
+ int rc;
|
|
|
|
+
|
|
|
|
+ if (vcpu->arch.sie_block->icptcode == ICPT_PROGI) {
|
|
|
|
+ /* PER address references the fetched or the execute instr */
|
|
|
|
+ *addr = vcpu->arch.sie_block->peraddr;
|
|
|
|
+ /*
|
|
|
|
+ * Manually detect if we have an EXECUTE instruction. As
|
|
|
|
+ * instructions are always 2 byte aligned we can read the
|
|
|
|
+ * first two bytes unconditionally
|
|
|
|
+ */
|
|
|
|
+ rc = read_guest_instr(vcpu, *addr, &opcode, 2);
|
|
|
|
+ if (rc)
|
|
|
|
+ return rc;
|
|
|
|
+ if (opcode[0] >> 8 == 0x44)
|
|
|
|
+ exec_ilen = 4;
|
|
|
|
+ if ((opcode[0] & 0xff0f) == 0xc600)
|
|
|
|
+ exec_ilen = 6;
|
|
|
|
+ } else {
|
|
|
|
+ /* instr was suppressed, calculate the responsible instr */
|
|
|
|
+ *addr = __rewind_psw(vcpu->arch.sie_block->gpsw,
|
|
|
|
+ kvm_s390_get_ilen(vcpu));
|
|
|
|
+ if (vcpu->arch.sie_block->icptstatus & 0x01) {
|
|
|
|
+ exec_ilen = (vcpu->arch.sie_block->icptstatus & 0x60) >> 4;
|
|
|
|
+ if (!exec_ilen)
|
|
|
|
+ exec_ilen = 4;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (exec_ilen) {
|
|
|
|
+ /* read the complete EXECUTE instr to detect the fetched addr */
|
|
|
|
+ rc = read_guest_instr(vcpu, *addr, &opcode, exec_ilen);
|
|
|
|
+ if (rc)
|
|
|
|
+ return rc;
|
|
|
|
+ if (exec_ilen == 6) {
|
|
|
|
+ /* EXECUTE RELATIVE LONG - RIL-b format */
|
|
|
|
+ s32 rl = *((s32 *) (opcode + 1));
|
|
|
|
+
|
|
|
|
+ /* rl is a _signed_ 32 bit value specifying halfwords */
|
|
|
|
+ *addr += (u64)(s64) rl * 2;
|
|
|
|
+ } else {
|
|
|
|
+ /* EXECUTE - RX-a format */
|
|
|
|
+ u32 base = (opcode[1] & 0xf000) >> 12;
|
|
|
|
+ u32 disp = opcode[1] & 0x0fff;
|
|
|
|
+ u32 index = opcode[0] & 0x000f;
|
|
|
|
+
|
|
|
|
+ *addr = base ? vcpu->run->s.regs.gprs[base] : 0;
|
|
|
|
+ *addr += index ? vcpu->run->s.regs.gprs[index] : 0;
|
|
|
|
+ *addr += disp;
|
|
|
|
+ }
|
|
|
|
+ *addr = kvm_s390_logical_to_effective(vcpu, *addr);
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
#define guest_per_enabled(vcpu) \
|
|
#define guest_per_enabled(vcpu) \
|
|
(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
|
|
(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER)
|
|
|
|
|
|
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
|
|
int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
|
|
+ const u64 cr10 = vcpu->arch.sie_block->gcr[10];
|
|
|
|
+ const u64 cr11 = vcpu->arch.sie_block->gcr[11];
|
|
const u8 ilen = kvm_s390_get_ilen(vcpu);
|
|
const u8 ilen = kvm_s390_get_ilen(vcpu);
|
|
struct kvm_s390_pgm_info pgm_info = {
|
|
struct kvm_s390_pgm_info pgm_info = {
|
|
.code = PGM_PER,
|
|
.code = PGM_PER,
|
|
.per_code = PER_CODE_IFETCH,
|
|
.per_code = PER_CODE_IFETCH,
|
|
.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
|
|
.per_address = __rewind_psw(vcpu->arch.sie_block->gpsw, ilen),
|
|
};
|
|
};
|
|
|
|
+ unsigned long fetched_addr;
|
|
|
|
+ int rc;
|
|
|
|
|
|
/*
|
|
/*
|
|
* The PSW points to the next instruction, therefore the intercepted
|
|
* The PSW points to the next instruction, therefore the intercepted
|
|
* instruction generated a PER i-fetch event. PER address therefore
|
|
* instruction generated a PER i-fetch event. PER address therefore
|
|
* points at the previous PSW address (could be an EXECUTE function).
|
|
* points at the previous PSW address (could be an EXECUTE function).
|
|
*/
|
|
*/
|
|
- return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
|
|
|
|
|
|
+ if (!guestdbg_enabled(vcpu))
|
|
|
|
+ return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
|
|
|
|
+
|
|
|
|
+ if (debug_exit_required(vcpu, pgm_info.per_code, pgm_info.per_address))
|
|
|
|
+ vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
|
|
|
|
+
|
|
|
|
+ if (!guest_per_enabled(vcpu) ||
|
|
|
|
+ !(vcpu->arch.sie_block->gcr[9] & PER_EVENT_IFETCH))
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ rc = per_fetched_addr(vcpu, &fetched_addr);
|
|
|
|
+ if (rc < 0)
|
|
|
|
+ return rc;
|
|
|
|
+ if (rc)
|
|
|
|
+ /* instruction-fetching exceptions */
|
|
|
|
+ return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
|
|
|
|
+
|
|
|
|
+ if (in_addr_range(fetched_addr, cr10, cr11))
|
|
|
|
+ return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-static void filter_guest_per_event(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
+static int filter_guest_per_event(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
const u8 perc = vcpu->arch.sie_block->perc;
|
|
const u8 perc = vcpu->arch.sie_block->perc;
|
|
- u64 peraddr = vcpu->arch.sie_block->peraddr;
|
|
|
|
u64 addr = vcpu->arch.sie_block->gpsw.addr;
|
|
u64 addr = vcpu->arch.sie_block->gpsw.addr;
|
|
u64 cr9 = vcpu->arch.sie_block->gcr[9];
|
|
u64 cr9 = vcpu->arch.sie_block->gcr[9];
|
|
u64 cr10 = vcpu->arch.sie_block->gcr[10];
|
|
u64 cr10 = vcpu->arch.sie_block->gcr[10];
|
|
u64 cr11 = vcpu->arch.sie_block->gcr[11];
|
|
u64 cr11 = vcpu->arch.sie_block->gcr[11];
|
|
/* filter all events, demanded by the guest */
|
|
/* filter all events, demanded by the guest */
|
|
u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
|
|
u8 guest_perc = perc & (cr9 >> 24) & PER_CODE_MASK;
|
|
|
|
+ unsigned long fetched_addr;
|
|
|
|
+ int rc;
|
|
|
|
|
|
if (!guest_per_enabled(vcpu))
|
|
if (!guest_per_enabled(vcpu))
|
|
guest_perc = 0;
|
|
guest_perc = 0;
|
|
@@ -478,9 +559,17 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
|
|
guest_perc &= ~PER_CODE_BRANCH;
|
|
guest_perc &= ~PER_CODE_BRANCH;
|
|
|
|
|
|
/* filter "instruction-fetching" events */
|
|
/* filter "instruction-fetching" events */
|
|
- if (guest_perc & PER_CODE_IFETCH &&
|
|
|
|
- !in_addr_range(peraddr, cr10, cr11))
|
|
|
|
- guest_perc &= ~PER_CODE_IFETCH;
|
|
|
|
|
|
+ if (guest_perc & PER_CODE_IFETCH) {
|
|
|
|
+ rc = per_fetched_addr(vcpu, &fetched_addr);
|
|
|
|
+ if (rc < 0)
|
|
|
|
+ return rc;
|
|
|
|
+ /*
|
|
|
|
+ * Don't inject an irq on exceptions. This would make handling
|
|
|
|
+ * on icpt code 8 very complex (as PSW was already rewound).
|
|
|
|
+ */
|
|
|
|
+ if (rc || !in_addr_range(fetched_addr, cr10, cr11))
|
|
|
|
+ guest_perc &= ~PER_CODE_IFETCH;
|
|
|
|
+ }
|
|
|
|
|
|
/* All other PER events will be given to the guest */
|
|
/* All other PER events will be given to the guest */
|
|
/* TODO: Check altered address/address space */
|
|
/* TODO: Check altered address/address space */
|
|
@@ -489,6 +578,7 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
if (!guest_perc)
|
|
if (!guest_perc)
|
|
vcpu->arch.sie_block->iprcc &= ~PGM_PER;
|
|
vcpu->arch.sie_block->iprcc &= ~PGM_PER;
|
|
|
|
+ return 0;
|
|
}
|
|
}
|
|
|
|
|
|
#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
|
|
#define pssec(vcpu) (vcpu->arch.sie_block->gcr[1] & _ASCE_SPACE_SWITCH)
|
|
@@ -496,14 +586,17 @@ static void filter_guest_per_event(struct kvm_vcpu *vcpu)
|
|
#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
|
|
#define old_ssec(vcpu) ((vcpu->arch.sie_block->tecmc >> 31) & 0x1)
|
|
#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
|
|
#define old_as_is_home(vcpu) !(vcpu->arch.sie_block->tecmc & 0xffff)
|
|
|
|
|
|
-void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
+int kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
- int new_as;
|
|
|
|
|
|
+ int rc, new_as;
|
|
|
|
|
|
- if (debug_exit_required(vcpu))
|
|
|
|
|
|
+ if (debug_exit_required(vcpu, vcpu->arch.sie_block->perc,
|
|
|
|
+ vcpu->arch.sie_block->peraddr))
|
|
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
|
|
vcpu->guest_debug |= KVM_GUESTDBG_EXIT_PENDING;
|
|
|
|
|
|
- filter_guest_per_event(vcpu);
|
|
|
|
|
|
+ rc = filter_guest_per_event(vcpu);
|
|
|
|
+ if (rc)
|
|
|
|
+ return rc;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
|
|
* Only RP, SAC, SACF, PT, PTI, PR, PC instructions can trigger
|
|
@@ -532,4 +625,5 @@ void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu)
|
|
(pssec(vcpu) || old_ssec(vcpu)))
|
|
(pssec(vcpu) || old_ssec(vcpu)))
|
|
vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
|
|
vcpu->arch.sie_block->iprcc = PGM_SPACE_SWITCH;
|
|
}
|
|
}
|
|
|
|
+ return 0;
|
|
}
|
|
}
|