|
@@ -95,6 +95,23 @@ module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
|
|
|
MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
|
|
|
#endif
|
|
|
|
|
|
+/* Maximum halt poll interval defaults to KVM_HALT_POLL_NS_DEFAULT */
|
|
|
+static unsigned int halt_poll_max_ns = KVM_HALT_POLL_NS_DEFAULT;
|
|
|
+module_param(halt_poll_max_ns, uint, S_IRUGO | S_IWUSR);
|
|
|
+MODULE_PARM_DESC(halt_poll_max_ns, "Maximum halt poll time in ns");
|
|
|
+
|
|
|
+/* Factor by which the vcore halt poll interval is grown, default is to double
|
|
|
+ */
|
|
|
+static unsigned int halt_poll_ns_grow = 2;
|
|
|
+module_param(halt_poll_ns_grow, int, S_IRUGO);
|
|
|
+MODULE_PARM_DESC(halt_poll_ns_grow, "Factor halt poll time is grown by");
|
|
|
+
|
|
|
+/* Factor by which the vcore halt poll interval is shrunk, default is to reset
|
|
|
+ */
|
|
|
+static unsigned int halt_poll_ns_shrink;
|
|
|
+module_param(halt_poll_ns_shrink, int, S_IRUGO);
|
|
|
+MODULE_PARM_DESC(halt_poll_ns_shrink, "Factor halt poll time is shrunk by");
|
|
|
+
|
|
|
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
|
|
|
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
|
|
|
|
|
@@ -2621,32 +2638,82 @@ static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
|
|
|
finish_wait(&vcpu->arch.cpu_run, &wait);
|
|
|
}
|
|
|
|
|
|
+static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
|
|
|
+{
|
|
|
+ /* 10us base */
|
|
|
+ if (vc->halt_poll_ns == 0 && halt_poll_ns_grow)
|
|
|
+ vc->halt_poll_ns = 10000;
|
|
|
+ else
|
|
|
+ vc->halt_poll_ns *= halt_poll_ns_grow;
|
|
|
+
|
|
|
+ if (vc->halt_poll_ns > halt_poll_max_ns)
|
|
|
+ vc->halt_poll_ns = halt_poll_max_ns;
|
|
|
+}
|
|
|
+
|
|
|
+static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
|
|
|
+{
|
|
|
+ if (halt_poll_ns_shrink == 0)
|
|
|
+ vc->halt_poll_ns = 0;
|
|
|
+ else
|
|
|
+ vc->halt_poll_ns /= halt_poll_ns_shrink;
|
|
|
+}
|
|
|
+
|
|
|
+/* Check to see if any of the runnable vcpus on the vcore have pending
|
|
|
+ * exceptions or are no longer ceded
|
|
|
+ */
|
|
|
+static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
|
|
|
+{
|
|
|
+ struct kvm_vcpu *vcpu;
|
|
|
+ int i;
|
|
|
+
|
|
|
+ for_each_runnable_thread(i, vcpu, vc) {
|
|
|
+ if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded)
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* All the vcpus in this vcore are idle, so wait for a decrementer
|
|
|
* or external interrupt to one of the vcpus. vc->lock is held.
|
|
|
*/
|
|
|
static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
|
|
|
{
|
|
|
- struct kvm_vcpu *vcpu;
|
|
|
- int do_sleep = 1, i;
|
|
|
+ int do_sleep = 1;
|
|
|
+ ktime_t cur, start;
|
|
|
+ u64 block_ns;
|
|
|
DECLARE_SWAITQUEUE(wait);
|
|
|
|
|
|
- prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
|
|
|
+ /* Poll for pending exceptions and ceded state */
|
|
|
+ cur = start = ktime_get();
|
|
|
+ if (vc->halt_poll_ns) {
|
|
|
+ ktime_t stop = ktime_add_ns(start, vc->halt_poll_ns);
|
|
|
|
|
|
- /*
|
|
|
- * Check one last time for pending exceptions and ceded state after
|
|
|
- * we put ourselves on the wait queue
|
|
|
- */
|
|
|
- for_each_runnable_thread(i, vcpu, vc) {
|
|
|
- if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) {
|
|
|
- do_sleep = 0;
|
|
|
- break;
|
|
|
- }
|
|
|
+ vc->vcore_state = VCORE_POLLING;
|
|
|
+ spin_unlock(&vc->lock);
|
|
|
+
|
|
|
+ do {
|
|
|
+ if (kvmppc_vcore_check_block(vc)) {
|
|
|
+ do_sleep = 0;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ cur = ktime_get();
|
|
|
+ } while (single_task_running() && ktime_before(cur, stop));
|
|
|
+
|
|
|
+ spin_lock(&vc->lock);
|
|
|
+ vc->vcore_state = VCORE_INACTIVE;
|
|
|
+
|
|
|
+ if (!do_sleep)
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
- if (!do_sleep) {
|
|
|
+ prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
|
|
|
+
|
|
|
+ if (kvmppc_vcore_check_block(vc)) {
|
|
|
finish_swait(&vc->wq, &wait);
|
|
|
- return;
|
|
|
+ do_sleep = 0;
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
vc->vcore_state = VCORE_SLEEPING;
|
|
@@ -2657,6 +2724,27 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
|
|
|
spin_lock(&vc->lock);
|
|
|
vc->vcore_state = VCORE_INACTIVE;
|
|
|
trace_kvmppc_vcore_blocked(vc, 1);
|
|
|
+
|
|
|
+ cur = ktime_get();
|
|
|
+
|
|
|
+out:
|
|
|
+ block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
|
|
|
+
|
|
|
+ /* Adjust poll time */
|
|
|
+ if (halt_poll_max_ns) {
|
|
|
+ if (block_ns <= vc->halt_poll_ns)
|
|
|
+ ;
|
|
|
+ /* We slept and blocked for longer than the max halt time */
|
|
|
+ else if (vc->halt_poll_ns && block_ns > halt_poll_max_ns)
|
|
|
+ shrink_halt_poll_ns(vc);
|
|
|
+ /* We slept and our poll time is too small */
|
|
|
+ else if (vc->halt_poll_ns < halt_poll_max_ns &&
|
|
|
+ block_ns < halt_poll_max_ns)
|
|
|
+ grow_halt_poll_ns(vc);
|
|
|
+ } else
|
|
|
+ vc->halt_poll_ns = 0;
|
|
|
+
|
|
|
+ trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
|
|
|
}
|
|
|
|
|
|
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|