|
@@ -880,10 +880,10 @@ static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
|
|
|
if (offset == -1)
|
|
|
return;
|
|
|
|
|
|
- if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
|
|
- val.vval = VCPU_VSX_VR(vcpu, index);
|
|
|
+ if (index >= 32) {
|
|
|
+ val.vval = VCPU_VSX_VR(vcpu, index - 32);
|
|
|
val.vsxval[offset] = gpr;
|
|
|
- VCPU_VSX_VR(vcpu, index) = val.vval;
|
|
|
+ VCPU_VSX_VR(vcpu, index - 32) = val.vval;
|
|
|
} else {
|
|
|
VCPU_VSX_FPR(vcpu, index, offset) = gpr;
|
|
|
}
|
|
@@ -895,11 +895,11 @@ static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
|
|
|
union kvmppc_one_reg val;
|
|
|
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
|
|
|
|
|
- if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
|
|
- val.vval = VCPU_VSX_VR(vcpu, index);
|
|
|
+ if (index >= 32) {
|
|
|
+ val.vval = VCPU_VSX_VR(vcpu, index - 32);
|
|
|
val.vsxval[0] = gpr;
|
|
|
val.vsxval[1] = gpr;
|
|
|
- VCPU_VSX_VR(vcpu, index) = val.vval;
|
|
|
+ VCPU_VSX_VR(vcpu, index - 32) = val.vval;
|
|
|
} else {
|
|
|
VCPU_VSX_FPR(vcpu, index, 0) = gpr;
|
|
|
VCPU_VSX_FPR(vcpu, index, 1) = gpr;
|
|
@@ -912,12 +912,12 @@ static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu *vcpu,
|
|
|
union kvmppc_one_reg val;
|
|
|
int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
|
|
|
|
|
|
- if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
|
|
+ if (index >= 32) {
|
|
|
val.vsx32val[0] = gpr;
|
|
|
val.vsx32val[1] = gpr;
|
|
|
val.vsx32val[2] = gpr;
|
|
|
val.vsx32val[3] = gpr;
|
|
|
- VCPU_VSX_VR(vcpu, index) = val.vval;
|
|
|
+ VCPU_VSX_VR(vcpu, index - 32) = val.vval;
|
|
|
} else {
|
|
|
val.vsx32val[0] = gpr;
|
|
|
val.vsx32val[1] = gpr;
|
|
@@ -937,10 +937,10 @@ static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
|
|
|
if (offset == -1)
|
|
|
return;
|
|
|
|
|
|
- if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
|
|
- val.vval = VCPU_VSX_VR(vcpu, index);
|
|
|
+ if (index >= 32) {
|
|
|
+ val.vval = VCPU_VSX_VR(vcpu, index - 32);
|
|
|
val.vsx32val[offset] = gpr32;
|
|
|
- VCPU_VSX_VR(vcpu, index) = val.vval;
|
|
|
+ VCPU_VSX_VR(vcpu, index - 32) = val.vval;
|
|
|
} else {
|
|
|
dword_offset = offset / 2;
|
|
|
word_offset = offset % 2;
|
|
@@ -1361,10 +1361,10 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
|
|
+ if (rs < 32) {
|
|
|
*val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
|
|
|
} else {
|
|
|
- reg.vval = VCPU_VSX_VR(vcpu, rs);
|
|
|
+ reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
|
|
|
*val = reg.vsxval[vsx_offset];
|
|
|
}
|
|
|
break;
|
|
@@ -1378,13 +1378,13 @@ static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
- if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
|
|
|
+ if (rs < 32) {
|
|
|
dword_offset = vsx_offset / 2;
|
|
|
word_offset = vsx_offset % 2;
|
|
|
reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
|
|
|
*val = reg.vsx32val[word_offset];
|
|
|
} else {
|
|
|
- reg.vval = VCPU_VSX_VR(vcpu, rs);
|
|
|
+ reg.vval = VCPU_VSX_VR(vcpu, rs - 32);
|
|
|
*val = reg.vsx32val[vsx_offset];
|
|
|
}
|
|
|
break;
|