|
@@ -123,6 +123,32 @@ static bool no_mixing_hpt_and_radix;
|
|
|
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
|
|
|
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
|
|
|
|
|
|
+/*
|
|
|
+ * RWMR values for POWER8. These control the rate at which PURR
|
|
|
+ * and SPURR count and should be set according to the number of
|
|
|
+ * online threads in the vcore being run.
|
|
|
+ */
|
|
|
+#define RWMR_RPA_P8_1THREAD 0x164520C62609AECA
|
|
|
+#define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9
|
|
|
+#define RWMR_RPA_P8_3THREAD 0x164520C62609AECA
|
|
|
+#define RWMR_RPA_P8_4THREAD 0x199A421245058DA9
|
|
|
+#define RWMR_RPA_P8_5THREAD 0x164520C62609AECA
|
|
|
+#define RWMR_RPA_P8_6THREAD 0x164520C62609AECA
|
|
|
+#define RWMR_RPA_P8_7THREAD 0x164520C62609AECA
|
|
|
+#define RWMR_RPA_P8_8THREAD 0x164520C62609AECA
|
|
|
+
|
|
|
+static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = {
|
|
|
+ RWMR_RPA_P8_1THREAD,
|
|
|
+ RWMR_RPA_P8_1THREAD,
|
|
|
+ RWMR_RPA_P8_2THREAD,
|
|
|
+ RWMR_RPA_P8_3THREAD,
|
|
|
+ RWMR_RPA_P8_4THREAD,
|
|
|
+ RWMR_RPA_P8_5THREAD,
|
|
|
+ RWMR_RPA_P8_6THREAD,
|
|
|
+ RWMR_RPA_P8_7THREAD,
|
|
|
+ RWMR_RPA_P8_8THREAD,
|
|
|
+};
|
|
|
+
|
|
|
static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
|
|
|
int *ip)
|
|
|
{
|
|
@@ -1761,7 +1787,12 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
|
|
|
vcpu->arch.vcore->tb_offset;
|
|
|
break;
|
|
|
case KVM_REG_PPC_ONLINE:
|
|
|
- vcpu->arch.online = set_reg_val(id, *val);
|
|
|
+ i = set_reg_val(id, *val);
|
|
|
+ if (i && !vcpu->arch.online)
|
|
|
+ atomic_inc(&vcpu->arch.vcore->online_count);
|
|
|
+ else if (!i && vcpu->arch.online)
|
|
|
+ atomic_dec(&vcpu->arch.vcore->online_count);
|
|
|
+ vcpu->arch.online = i;
|
|
|
break;
|
|
|
default:
|
|
|
r = -EINVAL;
|
|
@@ -2856,6 +2887,25 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * On POWER8, set RWMR register.
|
|
|
+ * Since it only affects PURR and SPURR, it doesn't affect
|
|
|
+ * the host, so we don't save/restore the host value.
|
|
|
+ */
|
|
|
+ if (is_power8) {
|
|
|
+ unsigned long rwmr_val = RWMR_RPA_P8_8THREAD;
|
|
|
+ int n_online = atomic_read(&vc->online_count);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Use the 8-thread value if we're doing split-core
|
|
|
+ * or if the vcore's online count looks bogus.
|
|
|
+ */
|
|
|
+ if (split == 1 && threads_per_subcore == MAX_SMT_THREADS &&
|
|
|
+ n_online >= 1 && n_online <= MAX_SMT_THREADS)
|
|
|
+ rwmr_val = p8_rwmr_values[n_online];
|
|
|
+ mtspr(SPRN_RWMR, rwmr_val);
|
|
|
+ }
|
|
|
+
|
|
|
/* Start all the threads */
|
|
|
active = 0;
|
|
|
for (sub = 0; sub < core_info.n_subcores; ++sub) {
|
|
@@ -3358,6 +3408,15 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
+ /*
|
|
|
+ * Force online to 1 for the sake of old userspace which doesn't
|
|
|
+ * set it.
|
|
|
+ */
|
|
|
+ if (!vcpu->arch.online) {
|
|
|
+ atomic_inc(&vcpu->arch.vcore->online_count);
|
|
|
+ vcpu->arch.online = 1;
|
|
|
+ }
|
|
|
+
|
|
|
kvmppc_core_prepare_to_enter(vcpu);
|
|
|
|
|
|
/* No need to go into the guest when all we'll do is come back out */
|