|
@@ -529,6 +529,8 @@ static inline bool is_rm(void)
|
|
|
|
|
|
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
+ if (!kvmppc_xics_enabled(vcpu))
|
|
|
+ return H_TOO_HARD;
|
|
|
if (xive_enabled()) {
|
|
|
if (is_rm())
|
|
|
return xive_rm_h_xirr(vcpu);
|
|
@@ -541,6 +543,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
|
|
|
{
|
|
|
+ if (!kvmppc_xics_enabled(vcpu))
|
|
|
+ return H_TOO_HARD;
|
|
|
vcpu->arch.gpr[5] = get_tb();
|
|
|
if (xive_enabled()) {
|
|
|
if (is_rm())
|
|
@@ -554,6 +558,8 @@ unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
|
|
|
|
|
|
unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
|
|
|
{
|
|
|
+ if (!kvmppc_xics_enabled(vcpu))
|
|
|
+ return H_TOO_HARD;
|
|
|
if (xive_enabled()) {
|
|
|
if (is_rm())
|
|
|
return xive_rm_h_ipoll(vcpu, server);
|
|
@@ -567,6 +573,8 @@ unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
|
|
|
int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
|
|
unsigned long mfrr)
|
|
|
{
|
|
|
+ if (!kvmppc_xics_enabled(vcpu))
|
|
|
+ return H_TOO_HARD;
|
|
|
if (xive_enabled()) {
|
|
|
if (is_rm())
|
|
|
return xive_rm_h_ipi(vcpu, server, mfrr);
|
|
@@ -579,6 +587,8 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
|
|
|
|
|
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
|
|
|
{
|
|
|
+ if (!kvmppc_xics_enabled(vcpu))
|
|
|
+ return H_TOO_HARD;
|
|
|
if (xive_enabled()) {
|
|
|
if (is_rm())
|
|
|
return xive_rm_h_cppr(vcpu, cppr);
|
|
@@ -591,6 +601,8 @@ int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
|
|
|
|
|
|
int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
|
|
|
{
|
|
|
+ if (!kvmppc_xics_enabled(vcpu))
|
|
|
+ return H_TOO_HARD;
|
|
|
if (xive_enabled()) {
|
|
|
if (is_rm())
|
|
|
return xive_rm_h_eoi(vcpu, xirr);
|