|
@@ -58,6 +58,9 @@
|
|
|
|
|
|
#include "book3s.h"
|
|
#include "book3s.h"
|
|
|
|
|
|
|
|
+#define CREATE_TRACE_POINTS
|
|
|
|
+#include "trace_hv.h"
|
|
|
|
+
|
|
/* #define EXIT_DEBUG */
|
|
/* #define EXIT_DEBUG */
|
|
/* #define EXIT_DEBUG_SIMPLE */
|
|
/* #define EXIT_DEBUG_SIMPLE */
|
|
/* #define EXIT_DEBUG_INT */
|
|
/* #define EXIT_DEBUG_INT */
|
|
@@ -135,11 +138,10 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
|
|
* stolen.
|
|
* stolen.
|
|
*
|
|
*
|
|
* Updates to busy_stolen are protected by arch.tbacct_lock;
|
|
* Updates to busy_stolen are protected by arch.tbacct_lock;
|
|
- * updates to vc->stolen_tb are protected by the arch.tbacct_lock
|
|
|
|
- * of the vcpu that has taken responsibility for running the vcore
|
|
|
|
- * (i.e. vc->runner). The stolen times are measured in units of
|
|
|
|
- * timebase ticks. (Note that the != TB_NIL checks below are
|
|
|
|
- * purely defensive; they should never fail.)
|
|
|
|
|
|
+ * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
|
|
|
|
+ * lock. The stolen times are measured in units of timebase ticks.
|
|
|
|
+ * (Note that the != TB_NIL checks below are purely defensive;
|
|
|
|
+ * they should never fail.)
|
|
*/
|
|
*/
|
|
|
|
|
|
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
|
|
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
|
|
@@ -147,12 +149,21 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
|
|
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
|
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
|
|
|
|
- if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
|
|
|
|
- vc->preempt_tb != TB_NIL) {
|
|
|
|
- vc->stolen_tb += mftb() - vc->preempt_tb;
|
|
|
|
- vc->preempt_tb = TB_NIL;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * We can test vc->runner without taking the vcore lock,
|
|
|
|
+ * because only this task ever sets vc->runner to this
|
|
|
|
+ * vcpu, and once it is set to this vcpu, only this task
|
|
|
|
+ * ever sets it to NULL.
|
|
|
|
+ */
|
|
|
|
+ if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
|
|
|
|
+ spin_lock_irqsave(&vc->stoltb_lock, flags);
|
|
|
|
+ if (vc->preempt_tb != TB_NIL) {
|
|
|
|
+ vc->stolen_tb += mftb() - vc->preempt_tb;
|
|
|
|
+ vc->preempt_tb = TB_NIL;
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_irqrestore(&vc->stoltb_lock, flags);
|
|
}
|
|
}
|
|
|
|
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
|
|
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
|
|
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
|
|
vcpu->arch.busy_preempt != TB_NIL) {
|
|
vcpu->arch.busy_preempt != TB_NIL) {
|
|
vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
|
|
vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
|
|
@@ -166,9 +177,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
|
|
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
|
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
|
|
|
|
- spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
|
|
|
|
- if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
|
|
|
|
|
|
+ if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
|
|
|
|
+ spin_lock_irqsave(&vc->stoltb_lock, flags);
|
|
vc->preempt_tb = mftb();
|
|
vc->preempt_tb = mftb();
|
|
|
|
+ spin_unlock_irqrestore(&vc->stoltb_lock, flags);
|
|
|
|
+ }
|
|
|
|
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
|
|
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
|
|
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
|
|
vcpu->arch.busy_preempt = mftb();
|
|
vcpu->arch.busy_preempt = mftb();
|
|
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
|
|
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
|
|
@@ -191,9 +205,6 @@ int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
|
|
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
|
struct kvmppc_vcore *vc = vcpu->arch.vcore;
|
|
|
|
|
|
if (arch_compat) {
|
|
if (arch_compat) {
|
|
- if (!cpu_has_feature(CPU_FTR_ARCH_206))
|
|
|
|
- return -EINVAL; /* 970 has no compat mode support */
|
|
|
|
-
|
|
|
|
switch (arch_compat) {
|
|
switch (arch_compat) {
|
|
case PVR_ARCH_205:
|
|
case PVR_ARCH_205:
|
|
/*
|
|
/*
|
|
@@ -505,25 +516,14 @@ static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
|
|
static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
|
|
static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
|
|
{
|
|
{
|
|
u64 p;
|
|
u64 p;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
- /*
|
|
|
|
- * If we are the task running the vcore, then since we hold
|
|
|
|
- * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb
|
|
|
|
- * can't be updated, so we don't need the tbacct_lock.
|
|
|
|
- * If the vcore is inactive, it can't become active (since we
|
|
|
|
- * hold the vcore lock), so the vcpu load/put functions won't
|
|
|
|
- * update stolen_tb/preempt_tb, and we don't need tbacct_lock.
|
|
|
|
- */
|
|
|
|
|
|
+ spin_lock_irqsave(&vc->stoltb_lock, flags);
|
|
|
|
+ p = vc->stolen_tb;
|
|
if (vc->vcore_state != VCORE_INACTIVE &&
|
|
if (vc->vcore_state != VCORE_INACTIVE &&
|
|
- vc->runner->arch.run_task != current) {
|
|
|
|
- spin_lock_irq(&vc->runner->arch.tbacct_lock);
|
|
|
|
- p = vc->stolen_tb;
|
|
|
|
- if (vc->preempt_tb != TB_NIL)
|
|
|
|
- p += now - vc->preempt_tb;
|
|
|
|
- spin_unlock_irq(&vc->runner->arch.tbacct_lock);
|
|
|
|
- } else {
|
|
|
|
- p = vc->stolen_tb;
|
|
|
|
- }
|
|
|
|
|
|
+ vc->preempt_tb != TB_NIL)
|
|
|
|
+ p += now - vc->preempt_tb;
|
|
|
|
+ spin_unlock_irqrestore(&vc->stoltb_lock, flags);
|
|
return p;
|
|
return p;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -607,10 +607,45 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
|
|
|
|
+{
|
|
|
|
+ struct kvmppc_vcore *vcore = target->arch.vcore;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * We expect to have been called by the real mode handler
|
|
|
|
+ * (kvmppc_rm_h_confer()) which would have directly returned
|
|
|
|
+ * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
|
|
|
|
+ * have useful work to do and should not confer) so we don't
|
|
|
|
+ * recheck that here.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+ spin_lock(&vcore->lock);
|
|
|
|
+ if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
|
|
|
|
+ vcore->vcore_state != VCORE_INACTIVE)
|
|
|
|
+ target = vcore->runner;
|
|
|
|
+ spin_unlock(&vcore->lock);
|
|
|
|
+
|
|
|
|
+ return kvm_vcpu_yield_to(target);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
|
|
|
|
+{
|
|
|
|
+ int yield_count = 0;
|
|
|
|
+ struct lppaca *lppaca;
|
|
|
|
+
|
|
|
|
+ spin_lock(&vcpu->arch.vpa_update_lock);
|
|
|
|
+ lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
|
|
|
|
+ if (lppaca)
|
|
|
|
+ yield_count = lppaca->yield_count;
|
|
|
|
+ spin_unlock(&vcpu->arch.vpa_update_lock);
|
|
|
|
+ return yield_count;
|
|
|
|
+}
|
|
|
|
+
|
|
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
|
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
unsigned long req = kvmppc_get_gpr(vcpu, 3);
|
|
unsigned long req = kvmppc_get_gpr(vcpu, 3);
|
|
unsigned long target, ret = H_SUCCESS;
|
|
unsigned long target, ret = H_SUCCESS;
|
|
|
|
+ int yield_count;
|
|
struct kvm_vcpu *tvcpu;
|
|
struct kvm_vcpu *tvcpu;
|
|
int idx, rc;
|
|
int idx, rc;
|
|
|
|
|
|
@@ -619,14 +654,6 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
|
return RESUME_HOST;
|
|
return RESUME_HOST;
|
|
|
|
|
|
switch (req) {
|
|
switch (req) {
|
|
- case H_ENTER:
|
|
|
|
- idx = srcu_read_lock(&vcpu->kvm->srcu);
|
|
|
|
- ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
|
|
|
|
- kvmppc_get_gpr(vcpu, 5),
|
|
|
|
- kvmppc_get_gpr(vcpu, 6),
|
|
|
|
- kvmppc_get_gpr(vcpu, 7));
|
|
|
|
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
|
|
|
|
- break;
|
|
|
|
case H_CEDE:
|
|
case H_CEDE:
|
|
break;
|
|
break;
|
|
case H_PROD:
|
|
case H_PROD:
|
|
@@ -654,7 +681,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
|
ret = H_PARAMETER;
|
|
ret = H_PARAMETER;
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
- kvm_vcpu_yield_to(tvcpu);
|
|
|
|
|
|
+ yield_count = kvmppc_get_gpr(vcpu, 5);
|
|
|
|
+ if (kvmppc_get_yield_count(tvcpu) != yield_count)
|
|
|
|
+ break;
|
|
|
|
+ kvm_arch_vcpu_yield_to(tvcpu);
|
|
break;
|
|
break;
|
|
case H_REGISTER_VPA:
|
|
case H_REGISTER_VPA:
|
|
ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
|
|
ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
|
|
@@ -769,6 +799,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
vcpu->stat.ext_intr_exits++;
|
|
vcpu->stat.ext_intr_exits++;
|
|
r = RESUME_GUEST;
|
|
r = RESUME_GUEST;
|
|
break;
|
|
break;
|
|
|
|
+ /* HMI is hypervisor interrupt and host has handled it. Resume guest.*/
|
|
|
|
+ case BOOK3S_INTERRUPT_HMI:
|
|
case BOOK3S_INTERRUPT_PERFMON:
|
|
case BOOK3S_INTERRUPT_PERFMON:
|
|
r = RESUME_GUEST;
|
|
r = RESUME_GUEST;
|
|
break;
|
|
break;
|
|
@@ -837,6 +869,10 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|
* Accordingly return to Guest or Host.
|
|
* Accordingly return to Guest or Host.
|
|
*/
|
|
*/
|
|
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
|
|
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
|
|
|
|
+ if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
|
|
|
|
+ vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
|
|
|
|
+ swab32(vcpu->arch.emul_inst) :
|
|
|
|
+ vcpu->arch.emul_inst;
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
|
|
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
|
|
r = kvmppc_emulate_debug_inst(run, vcpu);
|
|
r = kvmppc_emulate_debug_inst(run, vcpu);
|
|
} else {
|
|
} else {
|
|
@@ -1357,6 +1393,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
|
|
|
|
|
|
INIT_LIST_HEAD(&vcore->runnable_threads);
|
|
INIT_LIST_HEAD(&vcore->runnable_threads);
|
|
spin_lock_init(&vcore->lock);
|
|
spin_lock_init(&vcore->lock);
|
|
|
|
+ spin_lock_init(&vcore->stoltb_lock);
|
|
init_waitqueue_head(&vcore->wq);
|
|
init_waitqueue_head(&vcore->wq);
|
|
vcore->preempt_tb = TB_NIL;
|
|
vcore->preempt_tb = TB_NIL;
|
|
vcore->lpcr = kvm->arch.lpcr;
|
|
vcore->lpcr = kvm->arch.lpcr;
|
|
@@ -1694,9 +1731,11 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|
vc->n_woken = 0;
|
|
vc->n_woken = 0;
|
|
vc->nap_count = 0;
|
|
vc->nap_count = 0;
|
|
vc->entry_exit_count = 0;
|
|
vc->entry_exit_count = 0;
|
|
|
|
+ vc->preempt_tb = TB_NIL;
|
|
vc->vcore_state = VCORE_STARTING;
|
|
vc->vcore_state = VCORE_STARTING;
|
|
vc->in_guest = 0;
|
|
vc->in_guest = 0;
|
|
vc->napping_threads = 0;
|
|
vc->napping_threads = 0;
|
|
|
|
+ vc->conferring_threads = 0;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Updating any of the vpas requires calling kvmppc_pin_guest_page,
|
|
* Updating any of the vpas requires calling kvmppc_pin_guest_page,
|
|
@@ -1726,6 +1765,7 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
|
|
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
|
|
kvmppc_start_thread(vcpu);
|
|
kvmppc_start_thread(vcpu);
|
|
kvmppc_create_dtl_entry(vcpu, vc);
|
|
kvmppc_create_dtl_entry(vcpu, vc);
|
|
|
|
+ trace_kvm_guest_enter(vcpu);
|
|
}
|
|
}
|
|
|
|
|
|
/* Set this explicitly in case thread 0 doesn't have a vcpu */
|
|
/* Set this explicitly in case thread 0 doesn't have a vcpu */
|
|
@@ -1734,6 +1774,9 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|
|
|
|
|
vc->vcore_state = VCORE_RUNNING;
|
|
vc->vcore_state = VCORE_RUNNING;
|
|
preempt_disable();
|
|
preempt_disable();
|
|
|
|
+
|
|
|
|
+ trace_kvmppc_run_core(vc, 0);
|
|
|
|
+
|
|
spin_unlock(&vc->lock);
|
|
spin_unlock(&vc->lock);
|
|
|
|
|
|
kvm_guest_enter();
|
|
kvm_guest_enter();
|
|
@@ -1779,6 +1822,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|
kvmppc_core_pending_dec(vcpu))
|
|
kvmppc_core_pending_dec(vcpu))
|
|
kvmppc_core_dequeue_dec(vcpu);
|
|
kvmppc_core_dequeue_dec(vcpu);
|
|
|
|
|
|
|
|
+ trace_kvm_guest_exit(vcpu);
|
|
|
|
+
|
|
ret = RESUME_GUEST;
|
|
ret = RESUME_GUEST;
|
|
if (vcpu->arch.trap)
|
|
if (vcpu->arch.trap)
|
|
ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
|
|
ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
|
|
@@ -1804,6 +1849,8 @@ static void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|
wake_up(&vcpu->arch.cpu_run);
|
|
wake_up(&vcpu->arch.cpu_run);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ trace_kvmppc_run_core(vc, 1);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -1826,15 +1873,37 @@ static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
|
|
*/
|
|
*/
|
|
static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
|
|
static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
|
|
{
|
|
{
|
|
|
|
+ struct kvm_vcpu *vcpu;
|
|
|
|
+ int do_sleep = 1;
|
|
|
|
+
|
|
DEFINE_WAIT(wait);
|
|
DEFINE_WAIT(wait);
|
|
|
|
|
|
prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
|
|
prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Check one last time for pending exceptions and ceded state after
|
|
|
|
+ * we put ourselves on the wait queue
|
|
|
|
+ */
|
|
|
|
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
|
|
|
|
+ if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
|
|
|
|
+ do_sleep = 0;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!do_sleep) {
|
|
|
|
+ finish_wait(&vc->wq, &wait);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
vc->vcore_state = VCORE_SLEEPING;
|
|
vc->vcore_state = VCORE_SLEEPING;
|
|
|
|
+ trace_kvmppc_vcore_blocked(vc, 0);
|
|
spin_unlock(&vc->lock);
|
|
spin_unlock(&vc->lock);
|
|
schedule();
|
|
schedule();
|
|
finish_wait(&vc->wq, &wait);
|
|
finish_wait(&vc->wq, &wait);
|
|
spin_lock(&vc->lock);
|
|
spin_lock(&vc->lock);
|
|
vc->vcore_state = VCORE_INACTIVE;
|
|
vc->vcore_state = VCORE_INACTIVE;
|
|
|
|
+ trace_kvmppc_vcore_blocked(vc, 1);
|
|
}
|
|
}
|
|
|
|
|
|
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
@@ -1843,6 +1912,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
struct kvmppc_vcore *vc;
|
|
struct kvmppc_vcore *vc;
|
|
struct kvm_vcpu *v, *vn;
|
|
struct kvm_vcpu *v, *vn;
|
|
|
|
|
|
|
|
+ trace_kvmppc_run_vcpu_enter(vcpu);
|
|
|
|
+
|
|
kvm_run->exit_reason = 0;
|
|
kvm_run->exit_reason = 0;
|
|
vcpu->arch.ret = RESUME_GUEST;
|
|
vcpu->arch.ret = RESUME_GUEST;
|
|
vcpu->arch.trap = 0;
|
|
vcpu->arch.trap = 0;
|
|
@@ -1872,6 +1943,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
VCORE_EXIT_COUNT(vc) == 0) {
|
|
VCORE_EXIT_COUNT(vc) == 0) {
|
|
kvmppc_create_dtl_entry(vcpu, vc);
|
|
kvmppc_create_dtl_entry(vcpu, vc);
|
|
kvmppc_start_thread(vcpu);
|
|
kvmppc_start_thread(vcpu);
|
|
|
|
+ trace_kvm_guest_enter(vcpu);
|
|
} else if (vc->vcore_state == VCORE_SLEEPING) {
|
|
} else if (vc->vcore_state == VCORE_SLEEPING) {
|
|
wake_up(&vc->wq);
|
|
wake_up(&vc->wq);
|
|
}
|
|
}
|
|
@@ -1936,6 +2008,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|
wake_up(&v->arch.cpu_run);
|
|
wake_up(&v->arch.cpu_run);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
|
|
spin_unlock(&vc->lock);
|
|
spin_unlock(&vc->lock);
|
|
return vcpu->arch.ret;
|
|
return vcpu->arch.ret;
|
|
}
|
|
}
|
|
@@ -1962,7 +2035,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|
/* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
|
|
/* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
|
|
smp_mb();
|
|
smp_mb();
|
|
|
|
|
|
- /* On the first time here, set up HTAB and VRMA or RMA */
|
|
|
|
|
|
+ /* On the first time here, set up HTAB and VRMA */
|
|
if (!vcpu->kvm->arch.rma_setup_done) {
|
|
if (!vcpu->kvm->arch.rma_setup_done) {
|
|
r = kvmppc_hv_setup_htab_rma(vcpu);
|
|
r = kvmppc_hv_setup_htab_rma(vcpu);
|
|
if (r)
|
|
if (r)
|
|
@@ -1981,7 +2054,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|
|
|
|
|
if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
|
|
if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
|
|
!(vcpu->arch.shregs.msr & MSR_PR)) {
|
|
!(vcpu->arch.shregs.msr & MSR_PR)) {
|
|
|
|
+ trace_kvm_hcall_enter(vcpu);
|
|
r = kvmppc_pseries_do_hcall(vcpu);
|
|
r = kvmppc_pseries_do_hcall(vcpu);
|
|
|
|
+ trace_kvm_hcall_exit(vcpu, r);
|
|
kvmppc_core_prepare_to_enter(vcpu);
|
|
kvmppc_core_prepare_to_enter(vcpu);
|
|
} else if (r == RESUME_PAGE_FAULT) {
|
|
} else if (r == RESUME_PAGE_FAULT) {
|
|
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
|
srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
|
|
@@ -1997,98 +2072,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
-/* Work out RMLS (real mode limit selector) field value for a given RMA size.
|
|
|
|
- Assumes POWER7 or PPC970. */
|
|
|
|
-static inline int lpcr_rmls(unsigned long rma_size)
|
|
|
|
-{
|
|
|
|
- switch (rma_size) {
|
|
|
|
- case 32ul << 20: /* 32 MB */
|
|
|
|
- if (cpu_has_feature(CPU_FTR_ARCH_206))
|
|
|
|
- return 8; /* only supported on POWER7 */
|
|
|
|
- return -1;
|
|
|
|
- case 64ul << 20: /* 64 MB */
|
|
|
|
- return 3;
|
|
|
|
- case 128ul << 20: /* 128 MB */
|
|
|
|
- return 7;
|
|
|
|
- case 256ul << 20: /* 256 MB */
|
|
|
|
- return 4;
|
|
|
|
- case 1ul << 30: /* 1 GB */
|
|
|
|
- return 2;
|
|
|
|
- case 16ul << 30: /* 16 GB */
|
|
|
|
- return 1;
|
|
|
|
- case 256ul << 30: /* 256 GB */
|
|
|
|
- return 0;
|
|
|
|
- default:
|
|
|
|
- return -1;
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|
|
|
-{
|
|
|
|
- struct page *page;
|
|
|
|
- struct kvm_rma_info *ri = vma->vm_file->private_data;
|
|
|
|
-
|
|
|
|
- if (vmf->pgoff >= kvm_rma_pages)
|
|
|
|
- return VM_FAULT_SIGBUS;
|
|
|
|
-
|
|
|
|
- page = pfn_to_page(ri->base_pfn + vmf->pgoff);
|
|
|
|
- get_page(page);
|
|
|
|
- vmf->page = page;
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static const struct vm_operations_struct kvm_rma_vm_ops = {
|
|
|
|
- .fault = kvm_rma_fault,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
-{
|
|
|
|
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
|
|
|
|
- vma->vm_ops = &kvm_rma_vm_ops;
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static int kvm_rma_release(struct inode *inode, struct file *filp)
|
|
|
|
-{
|
|
|
|
- struct kvm_rma_info *ri = filp->private_data;
|
|
|
|
-
|
|
|
|
- kvm_release_rma(ri);
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-static const struct file_operations kvm_rma_fops = {
|
|
|
|
- .mmap = kvm_rma_mmap,
|
|
|
|
- .release = kvm_rma_release,
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
|
|
|
|
- struct kvm_allocate_rma *ret)
|
|
|
|
-{
|
|
|
|
- long fd;
|
|
|
|
- struct kvm_rma_info *ri;
|
|
|
|
- /*
|
|
|
|
- * Only do this on PPC970 in HV mode
|
|
|
|
- */
|
|
|
|
- if (!cpu_has_feature(CPU_FTR_HVMODE) ||
|
|
|
|
- !cpu_has_feature(CPU_FTR_ARCH_201))
|
|
|
|
- return -EINVAL;
|
|
|
|
-
|
|
|
|
- if (!kvm_rma_pages)
|
|
|
|
- return -EINVAL;
|
|
|
|
-
|
|
|
|
- ri = kvm_alloc_rma();
|
|
|
|
- if (!ri)
|
|
|
|
- return -ENOMEM;
|
|
|
|
-
|
|
|
|
- fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC);
|
|
|
|
- if (fd < 0)
|
|
|
|
- kvm_release_rma(ri);
|
|
|
|
-
|
|
|
|
- ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
|
|
|
|
- return fd;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
|
|
static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
|
|
int linux_psize)
|
|
int linux_psize)
|
|
{
|
|
{
|
|
@@ -2167,26 +2150,6 @@ out:
|
|
return r;
|
|
return r;
|
|
}
|
|
}
|
|
|
|
|
|
-static void unpin_slot(struct kvm_memory_slot *memslot)
|
|
|
|
-{
|
|
|
|
- unsigned long *physp;
|
|
|
|
- unsigned long j, npages, pfn;
|
|
|
|
- struct page *page;
|
|
|
|
-
|
|
|
|
- physp = memslot->arch.slot_phys;
|
|
|
|
- npages = memslot->npages;
|
|
|
|
- if (!physp)
|
|
|
|
- return;
|
|
|
|
- for (j = 0; j < npages; j++) {
|
|
|
|
- if (!(physp[j] & KVMPPC_GOT_PAGE))
|
|
|
|
- continue;
|
|
|
|
- pfn = physp[j] >> PAGE_SHIFT;
|
|
|
|
- page = pfn_to_page(pfn);
|
|
|
|
- SetPageDirty(page);
|
|
|
|
- put_page(page);
|
|
|
|
- }
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
|
|
static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
|
|
struct kvm_memory_slot *dont)
|
|
struct kvm_memory_slot *dont)
|
|
{
|
|
{
|
|
@@ -2194,11 +2157,6 @@ static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
|
|
vfree(free->arch.rmap);
|
|
vfree(free->arch.rmap);
|
|
free->arch.rmap = NULL;
|
|
free->arch.rmap = NULL;
|
|
}
|
|
}
|
|
- if (!dont || free->arch.slot_phys != dont->arch.slot_phys) {
|
|
|
|
- unpin_slot(free);
|
|
|
|
- vfree(free->arch.slot_phys);
|
|
|
|
- free->arch.slot_phys = NULL;
|
|
|
|
- }
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
|
|
static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
|
|
@@ -2207,7 +2165,6 @@ static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
|
|
slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
|
|
slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
|
|
if (!slot->arch.rmap)
|
|
if (!slot->arch.rmap)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
- slot->arch.slot_phys = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -2216,17 +2173,6 @@ static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
|
|
struct kvm_memory_slot *memslot,
|
|
struct kvm_memory_slot *memslot,
|
|
struct kvm_userspace_memory_region *mem)
|
|
struct kvm_userspace_memory_region *mem)
|
|
{
|
|
{
|
|
- unsigned long *phys;
|
|
|
|
-
|
|
|
|
- /* Allocate a slot_phys array if needed */
|
|
|
|
- phys = memslot->arch.slot_phys;
|
|
|
|
- if (!kvm->arch.using_mmu_notifiers && !phys && memslot->npages) {
|
|
|
|
- phys = vzalloc(memslot->npages * sizeof(unsigned long));
|
|
|
|
- if (!phys)
|
|
|
|
- return -ENOMEM;
|
|
|
|
- memslot->arch.slot_phys = phys;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2284,17 +2230,11 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|
{
|
|
{
|
|
int err = 0;
|
|
int err = 0;
|
|
struct kvm *kvm = vcpu->kvm;
|
|
struct kvm *kvm = vcpu->kvm;
|
|
- struct kvm_rma_info *ri = NULL;
|
|
|
|
unsigned long hva;
|
|
unsigned long hva;
|
|
struct kvm_memory_slot *memslot;
|
|
struct kvm_memory_slot *memslot;
|
|
struct vm_area_struct *vma;
|
|
struct vm_area_struct *vma;
|
|
unsigned long lpcr = 0, senc;
|
|
unsigned long lpcr = 0, senc;
|
|
- unsigned long lpcr_mask = 0;
|
|
|
|
unsigned long psize, porder;
|
|
unsigned long psize, porder;
|
|
- unsigned long rma_size;
|
|
|
|
- unsigned long rmls;
|
|
|
|
- unsigned long *physp;
|
|
|
|
- unsigned long i, npages;
|
|
|
|
int srcu_idx;
|
|
int srcu_idx;
|
|
|
|
|
|
mutex_lock(&kvm->lock);
|
|
mutex_lock(&kvm->lock);
|
|
@@ -2329,88 +2269,25 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
|
|
psize = vma_kernel_pagesize(vma);
|
|
psize = vma_kernel_pagesize(vma);
|
|
porder = __ilog2(psize);
|
|
porder = __ilog2(psize);
|
|
|
|
|
|
- /* Is this one of our preallocated RMAs? */
|
|
|
|
- if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
|
|
|
|
- hva == vma->vm_start)
|
|
|
|
- ri = vma->vm_file->private_data;
|
|
|
|
-
|
|
|
|
up_read(¤t->mm->mmap_sem);
|
|
up_read(¤t->mm->mmap_sem);
|
|
|
|
|
|
- if (!ri) {
|
|
|
|
- /* On POWER7, use VRMA; on PPC970, give up */
|
|
|
|
- err = -EPERM;
|
|
|
|
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
|
|
|
|
- pr_err("KVM: CPU requires an RMO\n");
|
|
|
|
- goto out_srcu;
|
|
|
|
- }
|
|
|
|
|
|
+ /* We can handle 4k, 64k or 16M pages in the VRMA */
|
|
|
|
+ err = -EINVAL;
|
|
|
|
+ if (!(psize == 0x1000 || psize == 0x10000 ||
|
|
|
|
+ psize == 0x1000000))
|
|
|
|
+ goto out_srcu;
|
|
|
|
|
|
- /* We can handle 4k, 64k or 16M pages in the VRMA */
|
|
|
|
- err = -EINVAL;
|
|
|
|
- if (!(psize == 0x1000 || psize == 0x10000 ||
|
|
|
|
- psize == 0x1000000))
|
|
|
|
- goto out_srcu;
|
|
|
|
|
|
+ /* Update VRMASD field in the LPCR */
|
|
|
|
+ senc = slb_pgsize_encoding(psize);
|
|
|
|
+ kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
|
|
|
|
+ (VRMA_VSID << SLB_VSID_SHIFT_1T);
|
|
|
|
+ /* the -4 is to account for senc values starting at 0x10 */
|
|
|
|
+ lpcr = senc << (LPCR_VRMASD_SH - 4);
|
|
|
|
|
|
- /* Update VRMASD field in the LPCR */
|
|
|
|
- senc = slb_pgsize_encoding(psize);
|
|
|
|
- kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
|
|
|
|
- (VRMA_VSID << SLB_VSID_SHIFT_1T);
|
|
|
|
- lpcr_mask = LPCR_VRMASD;
|
|
|
|
- /* the -4 is to account for senc values starting at 0x10 */
|
|
|
|
- lpcr = senc << (LPCR_VRMASD_SH - 4);
|
|
|
|
|
|
+ /* Create HPTEs in the hash page table for the VRMA */
|
|
|
|
+ kvmppc_map_vrma(vcpu, memslot, porder);
|
|
|
|
|
|
- /* Create HPTEs in the hash page table for the VRMA */
|
|
|
|
- kvmppc_map_vrma(vcpu, memslot, porder);
|
|
|
|
-
|
|
|
|
- } else {
|
|
|
|
- /* Set up to use an RMO region */
|
|
|
|
- rma_size = kvm_rma_pages;
|
|
|
|
- if (rma_size > memslot->npages)
|
|
|
|
- rma_size = memslot->npages;
|
|
|
|
- rma_size <<= PAGE_SHIFT;
|
|
|
|
- rmls = lpcr_rmls(rma_size);
|
|
|
|
- err = -EINVAL;
|
|
|
|
- if ((long)rmls < 0) {
|
|
|
|
- pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
|
|
|
|
- goto out_srcu;
|
|
|
|
- }
|
|
|
|
- atomic_inc(&ri->use_count);
|
|
|
|
- kvm->arch.rma = ri;
|
|
|
|
-
|
|
|
|
- /* Update LPCR and RMOR */
|
|
|
|
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
|
|
|
|
- /* PPC970; insert RMLS value (split field) in HID4 */
|
|
|
|
- lpcr_mask = (1ul << HID4_RMLS0_SH) |
|
|
|
|
- (3ul << HID4_RMLS2_SH) | HID4_RMOR;
|
|
|
|
- lpcr = ((rmls >> 2) << HID4_RMLS0_SH) |
|
|
|
|
- ((rmls & 3) << HID4_RMLS2_SH);
|
|
|
|
- /* RMOR is also in HID4 */
|
|
|
|
- lpcr |= ((ri->base_pfn >> (26 - PAGE_SHIFT)) & 0xffff)
|
|
|
|
- << HID4_RMOR_SH;
|
|
|
|
- } else {
|
|
|
|
- /* POWER7 */
|
|
|
|
- lpcr_mask = LPCR_VPM0 | LPCR_VRMA_L | LPCR_RMLS;
|
|
|
|
- lpcr = rmls << LPCR_RMLS_SH;
|
|
|
|
- kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
|
|
|
|
- }
|
|
|
|
- pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
|
|
|
|
- ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
|
|
|
|
-
|
|
|
|
- /* Initialize phys addrs of pages in RMO */
|
|
|
|
- npages = kvm_rma_pages;
|
|
|
|
- porder = __ilog2(npages);
|
|
|
|
- physp = memslot->arch.slot_phys;
|
|
|
|
- if (physp) {
|
|
|
|
- if (npages > memslot->npages)
|
|
|
|
- npages = memslot->npages;
|
|
|
|
- spin_lock(&kvm->arch.slot_phys_lock);
|
|
|
|
- for (i = 0; i < npages; ++i)
|
|
|
|
- physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) +
|
|
|
|
- porder;
|
|
|
|
- spin_unlock(&kvm->arch.slot_phys_lock);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
|
|
|
|
|
|
+ kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
|
|
|
|
|
|
/* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
|
|
/* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
|
|
smp_wmb();
|
|
smp_wmb();
|
|
@@ -2449,35 +2326,21 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
|
|
memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
|
|
memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
|
|
sizeof(kvm->arch.enabled_hcalls));
|
|
sizeof(kvm->arch.enabled_hcalls));
|
|
|
|
|
|
- kvm->arch.rma = NULL;
|
|
|
|
-
|
|
|
|
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
|
|
kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
|
|
|
|
|
|
- if (cpu_has_feature(CPU_FTR_ARCH_201)) {
|
|
|
|
- /* PPC970; HID4 is effectively the LPCR */
|
|
|
|
- kvm->arch.host_lpid = 0;
|
|
|
|
- kvm->arch.host_lpcr = lpcr = mfspr(SPRN_HID4);
|
|
|
|
- lpcr &= ~((3 << HID4_LPID1_SH) | (0xful << HID4_LPID5_SH));
|
|
|
|
- lpcr |= ((lpid >> 4) << HID4_LPID1_SH) |
|
|
|
|
- ((lpid & 0xf) << HID4_LPID5_SH);
|
|
|
|
- } else {
|
|
|
|
- /* POWER7; init LPCR for virtual RMA mode */
|
|
|
|
- kvm->arch.host_lpid = mfspr(SPRN_LPID);
|
|
|
|
- kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
|
|
|
|
- lpcr &= LPCR_PECE | LPCR_LPES;
|
|
|
|
- lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
|
|
|
|
- LPCR_VPM0 | LPCR_VPM1;
|
|
|
|
- kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
|
|
|
|
- (VRMA_VSID << SLB_VSID_SHIFT_1T);
|
|
|
|
- /* On POWER8 turn on online bit to enable PURR/SPURR */
|
|
|
|
- if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
- lpcr |= LPCR_ONL;
|
|
|
|
- }
|
|
|
|
|
|
+ /* Init LPCR for virtual RMA mode */
|
|
|
|
+ kvm->arch.host_lpid = mfspr(SPRN_LPID);
|
|
|
|
+ kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
|
|
|
|
+ lpcr &= LPCR_PECE | LPCR_LPES;
|
|
|
|
+ lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
|
|
|
|
+ LPCR_VPM0 | LPCR_VPM1;
|
|
|
|
+ kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
|
|
|
|
+ (VRMA_VSID << SLB_VSID_SHIFT_1T);
|
|
|
|
+ /* On POWER8 turn on online bit to enable PURR/SPURR */
|
|
|
|
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
|
|
|
|
+ lpcr |= LPCR_ONL;
|
|
kvm->arch.lpcr = lpcr;
|
|
kvm->arch.lpcr = lpcr;
|
|
|
|
|
|
- kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
|
|
|
|
- spin_lock_init(&kvm->arch.slot_phys_lock);
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Track that we now have a HV mode VM active. This blocks secondary
|
|
* Track that we now have a HV mode VM active. This blocks secondary
|
|
* CPU threads from coming online.
|
|
* CPU threads from coming online.
|
|
@@ -2507,10 +2370,6 @@ static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
|
|
kvm_hv_vm_deactivated();
|
|
kvm_hv_vm_deactivated();
|
|
|
|
|
|
kvmppc_free_vcores(kvm);
|
|
kvmppc_free_vcores(kvm);
|
|
- if (kvm->arch.rma) {
|
|
|
|
- kvm_release_rma(kvm->arch.rma);
|
|
|
|
- kvm->arch.rma = NULL;
|
|
|
|
- }
|
|
|
|
|
|
|
|
kvmppc_free_hpt(kvm);
|
|
kvmppc_free_hpt(kvm);
|
|
}
|
|
}
|
|
@@ -2536,7 +2395,8 @@ static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
|
|
|
|
|
|
static int kvmppc_core_check_processor_compat_hv(void)
|
|
static int kvmppc_core_check_processor_compat_hv(void)
|
|
{
|
|
{
|
|
- if (!cpu_has_feature(CPU_FTR_HVMODE))
|
|
|
|
|
|
+ if (!cpu_has_feature(CPU_FTR_HVMODE) ||
|
|
|
|
+ !cpu_has_feature(CPU_FTR_ARCH_206))
|
|
return -EIO;
|
|
return -EIO;
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -2550,16 +2410,6 @@ static long kvm_arch_vm_ioctl_hv(struct file *filp,
|
|
|
|
|
|
switch (ioctl) {
|
|
switch (ioctl) {
|
|
|
|
|
|
- case KVM_ALLOCATE_RMA: {
|
|
|
|
- struct kvm_allocate_rma rma;
|
|
|
|
- struct kvm *kvm = filp->private_data;
|
|
|
|
-
|
|
|
|
- r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
|
|
|
|
- if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
|
|
|
|
- r = -EFAULT;
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
case KVM_PPC_ALLOCATE_HTAB: {
|
|
case KVM_PPC_ALLOCATE_HTAB: {
|
|
u32 htab_order;
|
|
u32 htab_order;
|
|
|
|
|