|
@@ -4384,7 +4384,7 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
|
|
|
addr, n, v))
|
|
|
&& kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v))
|
|
|
break;
|
|
|
- trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
|
|
|
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, v);
|
|
|
handled += n;
|
|
|
addr += n;
|
|
|
len -= n;
|
|
@@ -4643,7 +4643,7 @@ static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
|
|
|
{
|
|
|
if (vcpu->mmio_read_completed) {
|
|
|
trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
|
|
|
- vcpu->mmio_fragments[0].gpa, *(u64 *)val);
|
|
|
+ vcpu->mmio_fragments[0].gpa, val);
|
|
|
vcpu->mmio_read_completed = 0;
|
|
|
return 1;
|
|
|
}
|
|
@@ -4665,14 +4665,14 @@ static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
|
|
|
static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
|
|
|
{
|
|
|
- trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
|
|
|
+ trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, val);
|
|
|
return vcpu_mmio_write(vcpu, gpa, bytes, val);
|
|
|
}
|
|
|
|
|
|
static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
|
|
|
void *val, int bytes)
|
|
|
{
|
|
|
- trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
|
|
|
+ trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, NULL);
|
|
|
return X86EMUL_IO_NEEDED;
|
|
|
}
|
|
|
|