|
@@ -172,6 +172,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|
|
|
|
|
kvmppc_primary_no_guest:
|
|
|
/* We handle this much like a ceded vcpu */
|
|
|
+ /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
|
|
|
+ mfspr r3, SPRN_HDEC
|
|
|
+ mtspr SPRN_DEC, r3
|
|
|
+ /*
|
|
|
+ * Make sure the primary has finished the MMU switch.
|
|
|
+ * We should never get here on a secondary thread, but
|
|
|
+ * check it for robustness' sake.
|
|
|
+ */
|
|
|
+ ld r5, HSTATE_KVM_VCORE(r13)
|
|
|
+65: lbz r0, VCORE_IN_GUEST(r5)
|
|
|
+ cmpwi r0, 0
|
|
|
+ beq 65b
|
|
|
+ /* Set LPCR. */
|
|
|
+ ld r8,VCORE_LPCR(r5)
|
|
|
+ mtspr SPRN_LPCR,r8
|
|
|
+ isync
|
|
|
/* set our bit in napping_threads */
|
|
|
ld r5, HSTATE_KVM_VCORE(r13)
|
|
|
lbz r7, HSTATE_PTID(r13)
|
|
@@ -182,7 +198,7 @@ kvmppc_primary_no_guest:
|
|
|
or r3, r3, r0
|
|
|
stwcx. r3, 0, r6
|
|
|
bne 1b
|
|
|
- /* order napping_threads update vs testing entry_exit_count */
|
|
|
+ /* order napping_threads update vs testing entry_exit_map */
|
|
|
isync
|
|
|
li r12, 0
|
|
|
lwz r7, VCORE_ENTRY_EXIT(r5)
|
|
@@ -191,6 +207,7 @@ kvmppc_primary_no_guest:
|
|
|
li r3, NAPPING_NOVCPU
|
|
|
stb r3, HSTATE_NAPPING(r13)
|
|
|
|
|
|
+ li r3, 0 /* Don't wake on privileged (OS) doorbell */
|
|
|
b kvm_do_nap
|
|
|
|
|
|
kvm_novcpu_wakeup:
|
|
@@ -202,7 +219,7 @@ kvm_novcpu_wakeup:
|
|
|
|
|
|
/* check the wake reason */
|
|
|
bl kvmppc_check_wake_reason
|
|
|
-
|
|
|
+
|
|
|
/* see if any other thread is already exiting */
|
|
|
lwz r0, VCORE_ENTRY_EXIT(r5)
|
|
|
cmpwi r0, 0x100
|
|
@@ -222,13 +239,37 @@ kvm_novcpu_wakeup:
|
|
|
cmpdi r3, 0
|
|
|
bge kvm_novcpu_exit
|
|
|
|
|
|
+ /* See if our timeslice has expired (HDEC is negative) */
|
|
|
+ mfspr r0, SPRN_HDEC
|
|
|
+ li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
|
|
|
+ cmpwi r0, 0
|
|
|
+ blt kvm_novcpu_exit
|
|
|
+
|
|
|
/* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
|
|
|
ld r4, HSTATE_KVM_VCPU(r13)
|
|
|
cmpdi r4, 0
|
|
|
- bne kvmppc_got_guest
|
|
|
+ beq kvmppc_primary_no_guest
|
|
|
+
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
|
|
+ addi r3, r4, VCPU_TB_RMENTRY
|
|
|
+ bl kvmhv_start_timing
|
|
|
+#endif
|
|
|
+ b kvmppc_got_guest
|
|
|
|
|
|
kvm_novcpu_exit:
|
|
|
- b hdec_soon
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
|
|
+ ld r4, HSTATE_KVM_VCPU(r13)
|
|
|
+ cmpdi r4, 0
|
|
|
+ beq 13f
|
|
|
+ addi r3, r4, VCPU_TB_RMEXIT
|
|
|
+ bl kvmhv_accumulate_time
|
|
|
+#endif
|
|
|
+13: mr r3, r12
|
|
|
+ stw r12, 112-4(r1)
|
|
|
+ bl kvmhv_commence_exit
|
|
|
+ nop
|
|
|
+ lwz r12, 112-4(r1)
|
|
|
+ b kvmhv_switch_to_host
|
|
|
|
|
|
/*
|
|
|
* We come in here when wakened from nap mode.
|
|
@@ -239,9 +280,9 @@ kvm_novcpu_exit:
|
|
|
kvm_start_guest:
|
|
|
|
|
|
/* Set runlatch bit the minute you wake up from nap */
|
|
|
- mfspr r1, SPRN_CTRLF
|
|
|
- ori r1, r1, 1
|
|
|
- mtspr SPRN_CTRLT, r1
|
|
|
+ mfspr r0, SPRN_CTRLF
|
|
|
+ ori r0, r0, 1
|
|
|
+ mtspr SPRN_CTRLT, r0
|
|
|
|
|
|
ld r2,PACATOC(r13)
|
|
|
|
|
@@ -286,26 +327,21 @@ kvm_secondary_got_guest:
|
|
|
ld r6, PACA_DSCR(r13)
|
|
|
std r6, HSTATE_DSCR(r13)
|
|
|
|
|
|
+ /* Order load of vcore, ptid etc. after load of vcpu */
|
|
|
+ lwsync
|
|
|
bl kvmppc_hv_entry
|
|
|
|
|
|
/* Back from the guest, go back to nap */
|
|
|
/* Clear our vcpu pointer so we don't come back in early */
|
|
|
li r0, 0
|
|
|
- std r0, HSTATE_KVM_VCPU(r13)
|
|
|
/*
|
|
|
- * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
|
|
|
- * the nap_count, because once the increment to nap_count is
|
|
|
- * visible we could be given another vcpu.
|
|
|
+ * Once we clear HSTATE_KVM_VCPU(r13), the code in
|
|
|
+ * kvmppc_run_core() is going to assume that all our vcpu
|
|
|
+ * state is visible in memory. This lwsync makes sure
|
|
|
+ * that that is true.
|
|
|
*/
|
|
|
lwsync
|
|
|
-
|
|
|
- /* increment the nap count and then go to nap mode */
|
|
|
- ld r4, HSTATE_KVM_VCORE(r13)
|
|
|
- addi r4, r4, VCORE_NAP_COUNT
|
|
|
-51: lwarx r3, 0, r4
|
|
|
- addi r3, r3, 1
|
|
|
- stwcx. r3, 0, r4
|
|
|
- bne 51b
|
|
|
+ std r0, HSTATE_KVM_VCPU(r13)
|
|
|
|
|
|
/*
|
|
|
* At this point we have finished executing in the guest.
|
|
@@ -376,6 +412,14 @@ kvmppc_hv_entry:
|
|
|
li r6, KVM_GUEST_MODE_HOST_HV
|
|
|
stb r6, HSTATE_IN_GUEST(r13)
|
|
|
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
|
|
+ /* Store initial timestamp */
|
|
|
+ cmpdi r4, 0
|
|
|
+ beq 1f
|
|
|
+ addi r3, r4, VCPU_TB_RMENTRY
|
|
|
+ bl kvmhv_start_timing
|
|
|
+1:
|
|
|
+#endif
|
|
|
/* Clear out SLB */
|
|
|
li r6,0
|
|
|
slbmte r6,r6
|
|
@@ -387,21 +431,23 @@ kvmppc_hv_entry:
|
|
|
* We don't have to lock against concurrent tlbies,
|
|
|
* but we do have to coordinate across hardware threads.
|
|
|
*/
|
|
|
- /* Increment entry count iff exit count is zero. */
|
|
|
- ld r5,HSTATE_KVM_VCORE(r13)
|
|
|
- addi r9,r5,VCORE_ENTRY_EXIT
|
|
|
-21: lwarx r3,0,r9
|
|
|
- cmpwi r3,0x100 /* any threads starting to exit? */
|
|
|
+ /* Set bit in entry map iff exit map is zero. */
|
|
|
+ ld r5, HSTATE_KVM_VCORE(r13)
|
|
|
+ li r7, 1
|
|
|
+ lbz r6, HSTATE_PTID(r13)
|
|
|
+ sld r7, r7, r6
|
|
|
+ addi r9, r5, VCORE_ENTRY_EXIT
|
|
|
+21: lwarx r3, 0, r9
|
|
|
+ cmpwi r3, 0x100 /* any threads starting to exit? */
|
|
|
bge secondary_too_late /* if so we're too late to the party */
|
|
|
- addi r3,r3,1
|
|
|
- stwcx. r3,0,r9
|
|
|
+ or r3, r3, r7
|
|
|
+ stwcx. r3, 0, r9
|
|
|
bne 21b
|
|
|
|
|
|
/* Primary thread switches to guest partition. */
|
|
|
ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
|
|
|
- lbz r6,HSTATE_PTID(r13)
|
|
|
cmpwi r6,0
|
|
|
- bne 20f
|
|
|
+ bne 10f
|
|
|
ld r6,KVM_SDR1(r9)
|
|
|
lwz r7,KVM_LPID(r9)
|
|
|
li r0,LPID_RSVD /* switch to reserved LPID */
|
|
@@ -472,28 +518,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|
|
|
|
|
li r0,1
|
|
|
stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
|
|
|
- b 10f
|
|
|
-
|
|
|
- /* Secondary threads wait for primary to have done partition switch */
|
|
|
-20: lbz r0,VCORE_IN_GUEST(r5)
|
|
|
- cmpwi r0,0
|
|
|
- beq 20b
|
|
|
-
|
|
|
- /* Set LPCR and RMOR. */
|
|
|
-10: ld r8,VCORE_LPCR(r5)
|
|
|
- mtspr SPRN_LPCR,r8
|
|
|
- ld r8,KVM_RMOR(r9)
|
|
|
- mtspr SPRN_RMOR,r8
|
|
|
- isync
|
|
|
-
|
|
|
- /* Check if HDEC expires soon */
|
|
|
- mfspr r3,SPRN_HDEC
|
|
|
- cmpwi r3,512 /* 1 microsecond */
|
|
|
- li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
|
|
|
- blt hdec_soon
|
|
|
|
|
|
/* Do we have a guest vcpu to run? */
|
|
|
- cmpdi r4, 0
|
|
|
+10: cmpdi r4, 0
|
|
|
beq kvmppc_primary_no_guest
|
|
|
kvmppc_got_guest:
|
|
|
|
|
@@ -818,6 +845,30 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
|
|
clrrdi r6,r6,1
|
|
|
mtspr SPRN_CTRLT,r6
|
|
|
4:
|
|
|
+ /* Secondary threads wait for primary to have done partition switch */
|
|
|
+ ld r5, HSTATE_KVM_VCORE(r13)
|
|
|
+ lbz r6, HSTATE_PTID(r13)
|
|
|
+ cmpwi r6, 0
|
|
|
+ beq 21f
|
|
|
+ lbz r0, VCORE_IN_GUEST(r5)
|
|
|
+ cmpwi r0, 0
|
|
|
+ bne 21f
|
|
|
+ HMT_LOW
|
|
|
+20: lbz r0, VCORE_IN_GUEST(r5)
|
|
|
+ cmpwi r0, 0
|
|
|
+ beq 20b
|
|
|
+ HMT_MEDIUM
|
|
|
+21:
|
|
|
+ /* Set LPCR. */
|
|
|
+ ld r8,VCORE_LPCR(r5)
|
|
|
+ mtspr SPRN_LPCR,r8
|
|
|
+ isync
|
|
|
+
|
|
|
+ /* Check if HDEC expires soon */
|
|
|
+ mfspr r3, SPRN_HDEC
|
|
|
+ cmpwi r3, 512 /* 1 microsecond */
|
|
|
+ blt hdec_soon
|
|
|
+
|
|
|
ld r6, VCPU_CTR(r4)
|
|
|
lwz r7, VCPU_XER(r4)
|
|
|
|
|
@@ -880,6 +931,12 @@ fast_guest_return:
|
|
|
li r9, KVM_GUEST_MODE_GUEST_HV
|
|
|
stb r9, HSTATE_IN_GUEST(r13)
|
|
|
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
|
|
+ /* Accumulate timing */
|
|
|
+ addi r3, r4, VCPU_TB_GUEST
|
|
|
+ bl kvmhv_accumulate_time
|
|
|
+#endif
|
|
|
+
|
|
|
/* Enter guest */
|
|
|
|
|
|
BEGIN_FTR_SECTION
|
|
@@ -917,6 +974,27 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|
|
hrfid
|
|
|
b .
|
|
|
|
|
|
+secondary_too_late:
|
|
|
+ li r12, 0
|
|
|
+ cmpdi r4, 0
|
|
|
+ beq 11f
|
|
|
+ stw r12, VCPU_TRAP(r4)
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
|
|
+ addi r3, r4, VCPU_TB_RMEXIT
|
|
|
+ bl kvmhv_accumulate_time
|
|
|
+#endif
|
|
|
+11: b kvmhv_switch_to_host
|
|
|
+
|
|
|
+hdec_soon:
|
|
|
+ li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
|
|
|
+ stw r12, VCPU_TRAP(r4)
|
|
|
+ mr r9, r4
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
|
|
+ addi r3, r4, VCPU_TB_RMEXIT
|
|
|
+ bl kvmhv_accumulate_time
|
|
|
+#endif
|
|
|
+ b guest_exit_cont
|
|
|
+
|
|
|
/******************************************************************************
|
|
|
* *
|
|
|
* Exit code *
|
|
@@ -1002,6 +1080,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|
|
|
|
|
stw r12,VCPU_TRAP(r9)
|
|
|
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
|
|
+ addi r3, r9, VCPU_TB_RMINTR
|
|
|
+ mr r4, r9
|
|
|
+ bl kvmhv_accumulate_time
|
|
|
+ ld r5, VCPU_GPR(R5)(r9)
|
|
|
+ ld r6, VCPU_GPR(R6)(r9)
|
|
|
+ ld r7, VCPU_GPR(R7)(r9)
|
|
|
+ ld r8, VCPU_GPR(R8)(r9)
|
|
|
+#endif
|
|
|
+
|
|
|
/* Save HEIR (HV emulation assist reg) in emul_inst
|
|
|
if this is an HEI (HV emulation interrupt, e40) */
|
|
|
li r3,KVM_INST_FETCH_FAILED
|
|
@@ -1028,34 +1116,37 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
|
|
bne 2f
|
|
|
mfspr r3,SPRN_HDEC
|
|
|
cmpwi r3,0
|
|
|
- bge ignore_hdec
|
|
|
+ mr r4,r9
|
|
|
+ bge fast_guest_return
|
|
|
2:
|
|
|
/* See if this is an hcall we can handle in real mode */
|
|
|
cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
|
|
|
beq hcall_try_real_mode
|
|
|
|
|
|
+ /* Hypervisor doorbell - exit only if host IPI flag set */
|
|
|
+ cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
|
|
|
+ bne 3f
|
|
|
+ lbz r0, HSTATE_HOST_IPI(r13)
|
|
|
+ beq 4f
|
|
|
+ b guest_exit_cont
|
|
|
+3:
|
|
|
/* External interrupt ? */
|
|
|
cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
|
|
|
- bne+ ext_interrupt_to_host
|
|
|
+ bne+ guest_exit_cont
|
|
|
|
|
|
/* External interrupt, first check for host_ipi. If this is
|
|
|
* set, we know the host wants us out so let's do it now
|
|
|
*/
|
|
|
bl kvmppc_read_intr
|
|
|
cmpdi r3, 0
|
|
|
- bgt ext_interrupt_to_host
|
|
|
+ bgt guest_exit_cont
|
|
|
|
|
|
/* Check if any CPU is heading out to the host, if so head out too */
|
|
|
- ld r5, HSTATE_KVM_VCORE(r13)
|
|
|
+4: ld r5, HSTATE_KVM_VCORE(r13)
|
|
|
lwz r0, VCORE_ENTRY_EXIT(r5)
|
|
|
cmpwi r0, 0x100
|
|
|
- bge ext_interrupt_to_host
|
|
|
-
|
|
|
- /* Return to guest after delivering any pending interrupt */
|
|
|
mr r4, r9
|
|
|
- b deliver_guest_interrupt
|
|
|
-
|
|
|
-ext_interrupt_to_host:
|
|
|
+ blt deliver_guest_interrupt
|
|
|
|
|
|
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
|
|
|
/* Save more register state */
|
|
@@ -1065,7 +1156,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
|
|
|
stw r7, VCPU_DSISR(r9)
|
|
|
/* don't overwrite fault_dar/fault_dsisr if HDSI */
|
|
|
cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
|
|
|
- beq 6f
|
|
|
+ beq mc_cont
|
|
|
std r6, VCPU_FAULT_DAR(r9)
|
|
|
stw r7, VCPU_FAULT_DSISR(r9)
|
|
|
|
|
@@ -1073,9 +1164,20 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
|
|
|
cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
|
|
|
beq machine_check_realmode
|
|
|
mc_cont:
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
|
|
+ addi r3, r9, VCPU_TB_RMEXIT
|
|
|
+ mr r4, r9
|
|
|
+ bl kvmhv_accumulate_time
|
|
|
+#endif
|
|
|
+
|
|
|
+ /* Increment exit count, poke other threads to exit */
|
|
|
+ bl kvmhv_commence_exit
|
|
|
+ nop
|
|
|
+ ld r9, HSTATE_KVM_VCPU(r13)
|
|
|
+ lwz r12, VCPU_TRAP(r9)
|
|
|
|
|
|
/* Save guest CTRL register, set runlatch to 1 */
|
|
|
-6: mfspr r6,SPRN_CTRLF
|
|
|
+ mfspr r6,SPRN_CTRLF
|
|
|
stw r6,VCPU_CTRL(r9)
|
|
|
andi. r0,r6,1
|
|
|
bne 4f
|
|
@@ -1417,68 +1519,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|
|
slbia
|
|
|
ptesync
|
|
|
|
|
|
-hdec_soon: /* r12 = trap, r13 = paca */
|
|
|
/*
|
|
|
* POWER7/POWER8 guest -> host partition switch code.
|
|
|
* We don't have to lock against tlbies but we do
|
|
|
* have to coordinate the hardware threads.
|
|
|
*/
|
|
|
- /* Increment the threads-exiting-guest count in the 0xff00
|
|
|
- bits of vcore->entry_exit_count */
|
|
|
- ld r5,HSTATE_KVM_VCORE(r13)
|
|
|
- addi r6,r5,VCORE_ENTRY_EXIT
|
|
|
-41: lwarx r3,0,r6
|
|
|
- addi r0,r3,0x100
|
|
|
- stwcx. r0,0,r6
|
|
|
- bne 41b
|
|
|
- isync /* order stwcx. vs. reading napping_threads */
|
|
|
-
|
|
|
- /*
|
|
|
- * At this point we have an interrupt that we have to pass
|
|
|
- * up to the kernel or qemu; we can't handle it in real mode.
|
|
|
- * Thus we have to do a partition switch, so we have to
|
|
|
- * collect the other threads, if we are the first thread
|
|
|
- * to take an interrupt. To do this, we set the HDEC to 0,
|
|
|
- * which causes an HDEC interrupt in all threads within 2ns
|
|
|
- * because the HDEC register is shared between all 4 threads.
|
|
|
- * However, we don't need to bother if this is an HDEC
|
|
|
- * interrupt, since the other threads will already be on their
|
|
|
- * way here in that case.
|
|
|
- */
|
|
|
- cmpwi r3,0x100 /* Are we the first here? */
|
|
|
- bge 43f
|
|
|
- cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
|
|
|
- beq 40f
|
|
|
- li r0,0
|
|
|
- mtspr SPRN_HDEC,r0
|
|
|
-40:
|
|
|
- /*
|
|
|
- * Send an IPI to any napping threads, since an HDEC interrupt
|
|
|
- * doesn't wake CPUs up from nap.
|
|
|
- */
|
|
|
- lwz r3,VCORE_NAPPING_THREADS(r5)
|
|
|
- lbz r4,HSTATE_PTID(r13)
|
|
|
- li r0,1
|
|
|
- sld r0,r0,r4
|
|
|
- andc. r3,r3,r0 /* no sense IPI'ing ourselves */
|
|
|
- beq 43f
|
|
|
- /* Order entry/exit update vs. IPIs */
|
|
|
- sync
|
|
|
- mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
|
|
|
- subf r6,r4,r13
|
|
|
-42: andi. r0,r3,1
|
|
|
- beq 44f
|
|
|
- ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
|
|
|
- li r0,IPI_PRIORITY
|
|
|
- li r7,XICS_MFRR
|
|
|
- stbcix r0,r7,r8 /* trigger the IPI */
|
|
|
-44: srdi. r3,r3,1
|
|
|
- addi r6,r6,PACA_SIZE
|
|
|
- bne 42b
|
|
|
-
|
|
|
-secondary_too_late:
|
|
|
+kvmhv_switch_to_host:
|
|
|
/* Secondary threads wait for primary to do partition switch */
|
|
|
-43: ld r5,HSTATE_KVM_VCORE(r13)
|
|
|
+ ld r5,HSTATE_KVM_VCORE(r13)
|
|
|
ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
|
|
|
lbz r3,HSTATE_PTID(r13)
|
|
|
cmpwi r3,0
|
|
@@ -1562,6 +1610,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|
|
1: addi r8,r8,16
|
|
|
.endr
|
|
|
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
|
|
+ /* Finish timing, if we have a vcpu */
|
|
|
+ ld r4, HSTATE_KVM_VCPU(r13)
|
|
|
+ cmpdi r4, 0
|
|
|
+ li r3, 0
|
|
|
+ beq 2f
|
|
|
+ bl kvmhv_accumulate_time
|
|
|
+2:
|
|
|
+#endif
|
|
|
/* Unset guest mode */
|
|
|
li r0, KVM_GUEST_MODE_NONE
|
|
|
stb r0, HSTATE_IN_GUEST(r13)
|
|
@@ -1696,8 +1753,10 @@ kvmppc_hisi:
|
|
|
* Returns to the guest if we handle it, or continues on up to
|
|
|
* the kernel if we can't (i.e. if we don't have a handler for
|
|
|
* it, or if the handler returns H_TOO_HARD).
|
|
|
+ *
|
|
|
+ * r5 - r8 contain hcall args,
|
|
|
+ * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
|
|
|
*/
|
|
|
- .globl hcall_try_real_mode
|
|
|
hcall_try_real_mode:
|
|
|
ld r3,VCPU_GPR(R3)(r9)
|
|
|
andi. r0,r11,MSR_PR
|
|
@@ -1839,13 +1898,124 @@ hcall_real_table:
|
|
|
.long 0 /* 0x12c */
|
|
|
.long 0 /* 0x130 */
|
|
|
.long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
|
|
|
+ .long 0 /* 0x138 */
|
|
|
+ .long 0 /* 0x13c */
|
|
|
+ .long 0 /* 0x140 */
|
|
|
+ .long 0 /* 0x144 */
|
|
|
+ .long 0 /* 0x148 */
|
|
|
+ .long 0 /* 0x14c */
|
|
|
+ .long 0 /* 0x150 */
|
|
|
+ .long 0 /* 0x154 */
|
|
|
+ .long 0 /* 0x158 */
|
|
|
+ .long 0 /* 0x15c */
|
|
|
+ .long 0 /* 0x160 */
|
|
|
+ .long 0 /* 0x164 */
|
|
|
+ .long 0 /* 0x168 */
|
|
|
+ .long 0 /* 0x16c */
|
|
|
+ .long 0 /* 0x170 */
|
|
|
+ .long 0 /* 0x174 */
|
|
|
+ .long 0 /* 0x178 */
|
|
|
+ .long 0 /* 0x17c */
|
|
|
+ .long 0 /* 0x180 */
|
|
|
+ .long 0 /* 0x184 */
|
|
|
+ .long 0 /* 0x188 */
|
|
|
+ .long 0 /* 0x18c */
|
|
|
+ .long 0 /* 0x190 */
|
|
|
+ .long 0 /* 0x194 */
|
|
|
+ .long 0 /* 0x198 */
|
|
|
+ .long 0 /* 0x19c */
|
|
|
+ .long 0 /* 0x1a0 */
|
|
|
+ .long 0 /* 0x1a4 */
|
|
|
+ .long 0 /* 0x1a8 */
|
|
|
+ .long 0 /* 0x1ac */
|
|
|
+ .long 0 /* 0x1b0 */
|
|
|
+ .long 0 /* 0x1b4 */
|
|
|
+ .long 0 /* 0x1b8 */
|
|
|
+ .long 0 /* 0x1bc */
|
|
|
+ .long 0 /* 0x1c0 */
|
|
|
+ .long 0 /* 0x1c4 */
|
|
|
+ .long 0 /* 0x1c8 */
|
|
|
+ .long 0 /* 0x1cc */
|
|
|
+ .long 0 /* 0x1d0 */
|
|
|
+ .long 0 /* 0x1d4 */
|
|
|
+ .long 0 /* 0x1d8 */
|
|
|
+ .long 0 /* 0x1dc */
|
|
|
+ .long 0 /* 0x1e0 */
|
|
|
+ .long 0 /* 0x1e4 */
|
|
|
+ .long 0 /* 0x1e8 */
|
|
|
+ .long 0 /* 0x1ec */
|
|
|
+ .long 0 /* 0x1f0 */
|
|
|
+ .long 0 /* 0x1f4 */
|
|
|
+ .long 0 /* 0x1f8 */
|
|
|
+ .long 0 /* 0x1fc */
|
|
|
+ .long 0 /* 0x200 */
|
|
|
+ .long 0 /* 0x204 */
|
|
|
+ .long 0 /* 0x208 */
|
|
|
+ .long 0 /* 0x20c */
|
|
|
+ .long 0 /* 0x210 */
|
|
|
+ .long 0 /* 0x214 */
|
|
|
+ .long 0 /* 0x218 */
|
|
|
+ .long 0 /* 0x21c */
|
|
|
+ .long 0 /* 0x220 */
|
|
|
+ .long 0 /* 0x224 */
|
|
|
+ .long 0 /* 0x228 */
|
|
|
+ .long 0 /* 0x22c */
|
|
|
+ .long 0 /* 0x230 */
|
|
|
+ .long 0 /* 0x234 */
|
|
|
+ .long 0 /* 0x238 */
|
|
|
+ .long 0 /* 0x23c */
|
|
|
+ .long 0 /* 0x240 */
|
|
|
+ .long 0 /* 0x244 */
|
|
|
+ .long 0 /* 0x248 */
|
|
|
+ .long 0 /* 0x24c */
|
|
|
+ .long 0 /* 0x250 */
|
|
|
+ .long 0 /* 0x254 */
|
|
|
+ .long 0 /* 0x258 */
|
|
|
+ .long 0 /* 0x25c */
|
|
|
+ .long 0 /* 0x260 */
|
|
|
+ .long 0 /* 0x264 */
|
|
|
+ .long 0 /* 0x268 */
|
|
|
+ .long 0 /* 0x26c */
|
|
|
+ .long 0 /* 0x270 */
|
|
|
+ .long 0 /* 0x274 */
|
|
|
+ .long 0 /* 0x278 */
|
|
|
+ .long 0 /* 0x27c */
|
|
|
+ .long 0 /* 0x280 */
|
|
|
+ .long 0 /* 0x284 */
|
|
|
+ .long 0 /* 0x288 */
|
|
|
+ .long 0 /* 0x28c */
|
|
|
+ .long 0 /* 0x290 */
|
|
|
+ .long 0 /* 0x294 */
|
|
|
+ .long 0 /* 0x298 */
|
|
|
+ .long 0 /* 0x29c */
|
|
|
+ .long 0 /* 0x2a0 */
|
|
|
+ .long 0 /* 0x2a4 */
|
|
|
+ .long 0 /* 0x2a8 */
|
|
|
+ .long 0 /* 0x2ac */
|
|
|
+ .long 0 /* 0x2b0 */
|
|
|
+ .long 0 /* 0x2b4 */
|
|
|
+ .long 0 /* 0x2b8 */
|
|
|
+ .long 0 /* 0x2bc */
|
|
|
+ .long 0 /* 0x2c0 */
|
|
|
+ .long 0 /* 0x2c4 */
|
|
|
+ .long 0 /* 0x2c8 */
|
|
|
+ .long 0 /* 0x2cc */
|
|
|
+ .long 0 /* 0x2d0 */
|
|
|
+ .long 0 /* 0x2d4 */
|
|
|
+ .long 0 /* 0x2d8 */
|
|
|
+ .long 0 /* 0x2dc */
|
|
|
+ .long 0 /* 0x2e0 */
|
|
|
+ .long 0 /* 0x2e4 */
|
|
|
+ .long 0 /* 0x2e8 */
|
|
|
+ .long 0 /* 0x2ec */
|
|
|
+ .long 0 /* 0x2f0 */
|
|
|
+ .long 0 /* 0x2f4 */
|
|
|
+ .long 0 /* 0x2f8 */
|
|
|
+ .long 0 /* 0x2fc */
|
|
|
+ .long DOTSYM(kvmppc_h_random) - hcall_real_table
|
|
|
.globl hcall_real_table_end
|
|
|
hcall_real_table_end:
|
|
|
|
|
|
-ignore_hdec:
|
|
|
- mr r4,r9
|
|
|
- b fast_guest_return
|
|
|
-
|
|
|
_GLOBAL(kvmppc_h_set_xdabr)
|
|
|
andi. r0, r5, DABRX_USER | DABRX_KERNEL
|
|
|
beq 6f
|
|
@@ -1884,7 +2054,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|
|
li r3, 0
|
|
|
blr
|
|
|
|
|
|
-_GLOBAL(kvmppc_h_cede)
|
|
|
+_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
|
|
|
ori r11,r11,MSR_EE
|
|
|
std r11,VCPU_MSR(r3)
|
|
|
li r0,1
|
|
@@ -1893,8 +2063,8 @@ _GLOBAL(kvmppc_h_cede)
|
|
|
lbz r5,VCPU_PRODDED(r3)
|
|
|
cmpwi r5,0
|
|
|
bne kvm_cede_prodded
|
|
|
- li r0,0 /* set trap to 0 to say hcall is handled */
|
|
|
- stw r0,VCPU_TRAP(r3)
|
|
|
+ li r12,0 /* set trap to 0 to say hcall is handled */
|
|
|
+ stw r12,VCPU_TRAP(r3)
|
|
|
li r0,H_SUCCESS
|
|
|
std r0,VCPU_GPR(R3)(r3)
|
|
|
|
|
@@ -1912,12 +2082,11 @@ _GLOBAL(kvmppc_h_cede)
|
|
|
addi r6,r5,VCORE_NAPPING_THREADS
|
|
|
31: lwarx r4,0,r6
|
|
|
or r4,r4,r0
|
|
|
- PPC_POPCNTW(R7,R4)
|
|
|
- cmpw r7,r8
|
|
|
- bge kvm_cede_exit
|
|
|
+ cmpw r4,r8
|
|
|
+ beq kvm_cede_exit
|
|
|
stwcx. r4,0,r6
|
|
|
bne 31b
|
|
|
- /* order napping_threads update vs testing entry_exit_count */
|
|
|
+ /* order napping_threads update vs testing entry_exit_map */
|
|
|
isync
|
|
|
li r0,NAPPING_CEDE
|
|
|
stb r0,HSTATE_NAPPING(r13)
|
|
@@ -1954,22 +2123,53 @@ _GLOBAL(kvmppc_h_cede)
|
|
|
/* save FP state */
|
|
|
bl kvmppc_save_fp
|
|
|
|
|
|
+ /*
|
|
|
+ * Set DEC to the smaller of DEC and HDEC, so that we wake
|
|
|
+ * no later than the end of our timeslice (HDEC interrupts
|
|
|
+ * don't wake us from nap).
|
|
|
+ */
|
|
|
+ mfspr r3, SPRN_DEC
|
|
|
+ mfspr r4, SPRN_HDEC
|
|
|
+ mftb r5
|
|
|
+ cmpw r3, r4
|
|
|
+ ble 67f
|
|
|
+ mtspr SPRN_DEC, r4
|
|
|
+67:
|
|
|
+ /* save expiry time of guest decrementer */
|
|
|
+ extsw r3, r3
|
|
|
+ add r3, r3, r5
|
|
|
+ ld r4, HSTATE_KVM_VCPU(r13)
|
|
|
+ ld r5, HSTATE_KVM_VCORE(r13)
|
|
|
+ ld r6, VCORE_TB_OFFSET(r5)
|
|
|
+ subf r3, r6, r3 /* convert to host TB value */
|
|
|
+ std r3, VCPU_DEC_EXPIRES(r4)
|
|
|
+
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
|
|
+ ld r4, HSTATE_KVM_VCPU(r13)
|
|
|
+ addi r3, r4, VCPU_TB_CEDE
|
|
|
+ bl kvmhv_accumulate_time
|
|
|
+#endif
|
|
|
+
|
|
|
+ lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
|
|
|
+
|
|
|
/*
|
|
|
* Take a nap until a decrementer or external or doobell interrupt
|
|
|
- * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
|
|
|
- * runlatch bit before napping.
|
|
|
+ * occurs, with PECE1 and PECE0 set in LPCR.
|
|
|
+ * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
|
|
|
+ * Also clear the runlatch bit before napping.
|
|
|
*/
|
|
|
kvm_do_nap:
|
|
|
- mfspr r2, SPRN_CTRLF
|
|
|
- clrrdi r2, r2, 1
|
|
|
- mtspr SPRN_CTRLT, r2
|
|
|
+ mfspr r0, SPRN_CTRLF
|
|
|
+ clrrdi r0, r0, 1
|
|
|
+ mtspr SPRN_CTRLT, r0
|
|
|
|
|
|
li r0,1
|
|
|
stb r0,HSTATE_HWTHREAD_REQ(r13)
|
|
|
mfspr r5,SPRN_LPCR
|
|
|
ori r5,r5,LPCR_PECE0 | LPCR_PECE1
|
|
|
BEGIN_FTR_SECTION
|
|
|
- oris r5,r5,LPCR_PECEDP@h
|
|
|
+ ori r5, r5, LPCR_PECEDH
|
|
|
+ rlwimi r5, r3, 0, LPCR_PECEDP
|
|
|
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|
|
mtspr SPRN_LPCR,r5
|
|
|
isync
|
|
@@ -1994,9 +2194,23 @@ kvm_end_cede:
|
|
|
/* Woken by external or decrementer interrupt */
|
|
|
ld r1, HSTATE_HOST_R1(r13)
|
|
|
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
|
|
+ addi r3, r4, VCPU_TB_RMINTR
|
|
|
+ bl kvmhv_accumulate_time
|
|
|
+#endif
|
|
|
+
|
|
|
/* load up FP state */
|
|
|
bl kvmppc_load_fp
|
|
|
|
|
|
+ /* Restore guest decrementer */
|
|
|
+ ld r3, VCPU_DEC_EXPIRES(r4)
|
|
|
+ ld r5, HSTATE_KVM_VCORE(r13)
|
|
|
+ ld r6, VCORE_TB_OFFSET(r5)
|
|
|
+ add r3, r3, r6 /* convert host TB to guest TB value */
|
|
|
+ mftb r7
|
|
|
+ subf r3, r7, r3
|
|
|
+ mtspr SPRN_DEC, r3
|
|
|
+
|
|
|
/* Load NV GPRS */
|
|
|
ld r14, VCPU_GPR(R14)(r4)
|
|
|
ld r15, VCPU_GPR(R15)(r4)
|
|
@@ -2057,7 +2271,8 @@ kvm_cede_prodded:
|
|
|
|
|
|
/* we've ceded but we want to give control to the host */
|
|
|
kvm_cede_exit:
|
|
|
- b hcall_real_fallback
|
|
|
+ ld r9, HSTATE_KVM_VCPU(r13)
|
|
|
+ b guest_exit_cont
|
|
|
|
|
|
/* Try to handle a machine check in real mode */
|
|
|
machine_check_realmode:
|
|
@@ -2089,13 +2304,14 @@ machine_check_realmode:
|
|
|
|
|
|
/*
|
|
|
* Check the reason we woke from nap, and take appropriate action.
|
|
|
- * Returns:
|
|
|
+ * Returns (in r3):
|
|
|
* 0 if nothing needs to be done
|
|
|
* 1 if something happened that needs to be handled by the host
|
|
|
- * -1 if there was a guest wakeup (IPI)
|
|
|
+ * -1 if there was a guest wakeup (IPI or msgsnd)
|
|
|
*
|
|
|
* Also sets r12 to the interrupt vector for any interrupt that needs
|
|
|
* to be handled now by the host (0x500 for external interrupt), or zero.
|
|
|
+ * Modifies r0, r6, r7, r8.
|
|
|
*/
|
|
|
kvmppc_check_wake_reason:
|
|
|
mfspr r6, SPRN_SRR1
|
|
@@ -2122,7 +2338,15 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|
|
|
|
|
/* hypervisor doorbell */
|
|
|
3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
|
|
|
+ /* see if it's a host IPI */
|
|
|
li r3, 1
|
|
|
+ lbz r0, HSTATE_HOST_IPI(r13)
|
|
|
+ cmpwi r0, 0
|
|
|
+ bnelr
|
|
|
+ /* if not, clear it and return -1 */
|
|
|
+ lis r6, (PPC_DBELL_SERVER << (63-36))@h
|
|
|
+ PPC_MSGCLR(6)
|
|
|
+ li r3, -1
|
|
|
blr
|
|
|
|
|
|
/*
|
|
@@ -2131,6 +2355,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
|
|
|
* 0 if no interrupt is pending
|
|
|
* 1 if an interrupt is pending that needs to be handled by the host
|
|
|
* -1 if there was a guest wakeup IPI (which has now been cleared)
|
|
|
+ * Modifies r0, r6, r7, r8, returns value in r3.
|
|
|
*/
|
|
|
kvmppc_read_intr:
|
|
|
/* see if a host IPI is pending */
|
|
@@ -2185,6 +2410,7 @@ kvmppc_read_intr:
|
|
|
bne- 43f
|
|
|
|
|
|
/* OK, it's an IPI for us */
|
|
|
+ li r12, 0
|
|
|
li r3, -1
|
|
|
1: blr
|
|
|
|
|
@@ -2314,3 +2540,62 @@ kvmppc_fix_pmao:
|
|
|
mtspr SPRN_PMC6, r3
|
|
|
isync
|
|
|
blr
|
|
|
+
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
|
|
|
+/*
|
|
|
+ * Start timing an activity
|
|
|
+ * r3 = pointer to time accumulation struct, r4 = vcpu
|
|
|
+ */
|
|
|
+kvmhv_start_timing:
|
|
|
+ ld r5, HSTATE_KVM_VCORE(r13)
|
|
|
+ lbz r6, VCORE_IN_GUEST(r5)
|
|
|
+ cmpwi r6, 0
|
|
|
+ beq 5f /* if in guest, need to */
|
|
|
+ ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
|
|
|
+5: mftb r5
|
|
|
+ subf r5, r6, r5
|
|
|
+ std r3, VCPU_CUR_ACTIVITY(r4)
|
|
|
+ std r5, VCPU_ACTIVITY_START(r4)
|
|
|
+ blr
|
|
|
+
|
|
|
+/*
|
|
|
+ * Accumulate time to one activity and start another.
|
|
|
+ * r3 = pointer to new time accumulation struct, r4 = vcpu
|
|
|
+ */
|
|
|
+kvmhv_accumulate_time:
|
|
|
+ ld r5, HSTATE_KVM_VCORE(r13)
|
|
|
+ lbz r8, VCORE_IN_GUEST(r5)
|
|
|
+ cmpwi r8, 0
|
|
|
+ beq 4f /* if in guest, need to */
|
|
|
+ ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
|
|
|
+4: ld r5, VCPU_CUR_ACTIVITY(r4)
|
|
|
+ ld r6, VCPU_ACTIVITY_START(r4)
|
|
|
+ std r3, VCPU_CUR_ACTIVITY(r4)
|
|
|
+ mftb r7
|
|
|
+ subf r7, r8, r7
|
|
|
+ std r7, VCPU_ACTIVITY_START(r4)
|
|
|
+ cmpdi r5, 0
|
|
|
+ beqlr
|
|
|
+ subf r3, r6, r7
|
|
|
+ ld r8, TAS_SEQCOUNT(r5)
|
|
|
+ cmpdi r8, 0
|
|
|
+ addi r8, r8, 1
|
|
|
+ std r8, TAS_SEQCOUNT(r5)
|
|
|
+ lwsync
|
|
|
+ ld r7, TAS_TOTAL(r5)
|
|
|
+ add r7, r7, r3
|
|
|
+ std r7, TAS_TOTAL(r5)
|
|
|
+ ld r6, TAS_MIN(r5)
|
|
|
+ ld r7, TAS_MAX(r5)
|
|
|
+ beq 3f
|
|
|
+ cmpd r3, r6
|
|
|
+ bge 1f
|
|
|
+3: std r3, TAS_MIN(r5)
|
|
|
+1: cmpd r3, r7
|
|
|
+ ble 2f
|
|
|
+ std r3, TAS_MAX(r5)
|
|
|
+2: lwsync
|
|
|
+ addi r8, r8, 1
|
|
|
+ std r8, TAS_SEQCOUNT(r5)
|
|
|
+ blr
|
|
|
+#endif
|