|
@@ -87,6 +87,47 @@ static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
|
|
|
return true;
|
|
|
}
|
|
|
|
|
|
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
|
|
+static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
|
|
|
+ sizeof(vcpu->arch.gpr_tm));
|
|
|
+ memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
|
|
|
+ sizeof(struct thread_fp_state));
|
|
|
+ memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
|
|
|
+ sizeof(struct thread_vr_state));
|
|
|
+ vcpu->arch.ppr_tm = vcpu->arch.ppr;
|
|
|
+ vcpu->arch.dscr_tm = vcpu->arch.dscr;
|
|
|
+ vcpu->arch.amr_tm = vcpu->arch.amr;
|
|
|
+ vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
|
|
|
+ vcpu->arch.tar_tm = vcpu->arch.tar;
|
|
|
+ vcpu->arch.lr_tm = vcpu->arch.regs.link;
|
|
|
+ vcpu->arch.cr_tm = vcpu->arch.cr;
|
|
|
+ vcpu->arch.xer_tm = vcpu->arch.regs.xer;
|
|
|
+ vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
|
|
|
+{
|
|
|
+ memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
|
|
|
+ sizeof(vcpu->arch.regs.gpr));
|
|
|
+ memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
|
|
|
+ sizeof(struct thread_fp_state));
|
|
|
+ memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
|
|
|
+ sizeof(struct thread_vr_state));
|
|
|
+ vcpu->arch.ppr = vcpu->arch.ppr_tm;
|
|
|
+ vcpu->arch.dscr = vcpu->arch.dscr_tm;
|
|
|
+ vcpu->arch.amr = vcpu->arch.amr_tm;
|
|
|
+ vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
|
|
|
+ vcpu->arch.tar = vcpu->arch.tar_tm;
|
|
|
+ vcpu->arch.regs.link = vcpu->arch.lr_tm;
|
|
|
+ vcpu->arch.cr = vcpu->arch.cr_tm;
|
|
|
+ vcpu->arch.regs.xer = vcpu->arch.xer_tm;
|
|
|
+ vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
|
|
|
+}
|
|
|
+
|
|
|
+#endif
|
|
|
+
|
|
|
int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
|
unsigned int inst, int *advance)
|
|
|
{
|