|
@@ -18,6 +18,7 @@
|
|
#include <asm/hw_irq.h>
|
|
#include <asm/hw_irq.h>
|
|
#include <asm/kvm_book3s_asm.h>
|
|
#include <asm/kvm_book3s_asm.h>
|
|
#include <asm/opal.h>
|
|
#include <asm/opal.h>
|
|
|
|
+#include <asm/cpuidle.h>
|
|
|
|
|
|
#undef DEBUG
|
|
#undef DEBUG
|
|
|
|
|
|
@@ -37,8 +38,7 @@
|
|
|
|
|
|
/*
|
|
/*
|
|
* Pass requested state in r3:
|
|
* Pass requested state in r3:
|
|
- * 0 - nap
|
|
|
|
- * 1 - sleep
|
|
|
|
|
|
+ * r3 - PNV_THREAD_NAP/SLEEP/WINKLE
|
|
*
|
|
*
|
|
* To check IRQ_HAPPENED in r4
|
|
* To check IRQ_HAPPENED in r4
|
|
* 0 - don't check
|
|
* 0 - don't check
|
|
@@ -123,12 +123,58 @@ power7_enter_nap_mode:
|
|
li r4,KVM_HWTHREAD_IN_NAP
|
|
li r4,KVM_HWTHREAD_IN_NAP
|
|
stb r4,HSTATE_HWTHREAD_STATE(r13)
|
|
stb r4,HSTATE_HWTHREAD_STATE(r13)
|
|
#endif
|
|
#endif
|
|
- cmpwi cr0,r3,1
|
|
|
|
- beq 2f
|
|
|
|
|
|
+ stb r3,PACA_THREAD_IDLE_STATE(r13)
|
|
|
|
+ cmpwi cr1,r3,PNV_THREAD_SLEEP
|
|
|
|
+ bge cr1,2f
|
|
IDLE_STATE_ENTER_SEQ(PPC_NAP)
|
|
IDLE_STATE_ENTER_SEQ(PPC_NAP)
|
|
/* No return */
|
|
/* No return */
|
|
-2: IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
|
|
|
|
- /* No return */
|
|
|
|
|
|
+2:
|
|
|
|
+ /* Sleep or winkle */
|
|
|
|
+ lbz r7,PACA_THREAD_MASK(r13)
|
|
|
|
+ ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
|
|
|
|
+lwarx_loop1:
|
|
|
|
+ lwarx r15,0,r14
|
|
|
|
+ andc r15,r15,r7 /* Clear thread bit */
|
|
|
|
+
|
|
|
|
+ andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * If cr0 = 0, then current thread is the last thread of the core entering
|
|
|
|
+ * sleep. Last thread needs to execute the hardware bug workaround code if
|
|
|
|
+ * required by the platform.
|
|
|
|
+ * Make the workaround call unconditionally here. The below branch call is
|
|
|
|
+ * patched out when the idle states are discovered if the platform does not
|
|
|
|
+ * require it.
|
|
|
|
+ */
|
|
|
|
+.global pnv_fastsleep_workaround_at_entry
|
|
|
|
+pnv_fastsleep_workaround_at_entry:
|
|
|
|
+ beq fastsleep_workaround_at_entry
|
|
|
|
+
|
|
|
|
+ stwcx. r15,0,r14
|
|
|
|
+ bne- lwarx_loop1
|
|
|
|
+ isync
|
|
|
|
+
|
|
|
|
+common_enter: /* common code for all the threads entering sleep */
|
|
|
|
+ IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
|
|
|
|
+
|
|
|
|
+fastsleep_workaround_at_entry:
|
|
|
|
+ ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
|
|
|
|
+ stwcx. r15,0,r14
|
|
|
|
+ bne- lwarx_loop1
|
|
|
|
+ isync
|
|
|
|
+
|
|
|
|
+ /* Fast sleep workaround */
|
|
|
|
+ li r3,1
|
|
|
|
+ li r4,1
|
|
|
|
+ li r0,OPAL_CONFIG_CPU_IDLE_STATE
|
|
|
|
+ bl opal_call_realmode
|
|
|
|
+
|
|
|
|
+ /* Clear Lock bit */
|
|
|
|
+ li r0,0
|
|
|
|
+ lwsync
|
|
|
|
+ stw r0,0(r14)
|
|
|
|
+ b common_enter
|
|
|
|
+
|
|
|
|
|
|
_GLOBAL(power7_idle)
|
|
_GLOBAL(power7_idle)
|
|
/* Now check if user or arch enabled NAP mode */
|
|
/* Now check if user or arch enabled NAP mode */
|
|
@@ -141,49 +187,16 @@ _GLOBAL(power7_idle)
|
|
|
|
|
|
_GLOBAL(power7_nap)
|
|
_GLOBAL(power7_nap)
|
|
mr r4,r3
|
|
mr r4,r3
|
|
- li r3,0
|
|
|
|
|
|
+ li r3,PNV_THREAD_NAP
|
|
b power7_powersave_common
|
|
b power7_powersave_common
|
|
/* No return */
|
|
/* No return */
|
|
|
|
|
|
_GLOBAL(power7_sleep)
|
|
_GLOBAL(power7_sleep)
|
|
- li r3,1
|
|
|
|
|
|
+ li r3,PNV_THREAD_SLEEP
|
|
li r4,1
|
|
li r4,1
|
|
b power7_powersave_common
|
|
b power7_powersave_common
|
|
/* No return */
|
|
/* No return */
|
|
|
|
|
|
-/*
|
|
|
|
- * Make opal call in realmode. This is a generic function to be called
|
|
|
|
- * from realmode from reset vector. It handles endianess.
|
|
|
|
- *
|
|
|
|
- * r13 - paca pointer
|
|
|
|
- * r1 - stack pointer
|
|
|
|
- * r3 - opal token
|
|
|
|
- */
|
|
|
|
-opal_call_realmode:
|
|
|
|
- mflr r12
|
|
|
|
- std r12,_LINK(r1)
|
|
|
|
- ld r2,PACATOC(r13)
|
|
|
|
- /* Set opal return address */
|
|
|
|
- LOAD_REG_ADDR(r0,return_from_opal_call)
|
|
|
|
- mtlr r0
|
|
|
|
- /* Handle endian-ness */
|
|
|
|
- li r0,MSR_LE
|
|
|
|
- mfmsr r12
|
|
|
|
- andc r12,r12,r0
|
|
|
|
- mtspr SPRN_HSRR1,r12
|
|
|
|
- mr r0,r3 /* Move opal token to r0 */
|
|
|
|
- LOAD_REG_ADDR(r11,opal)
|
|
|
|
- ld r12,8(r11)
|
|
|
|
- ld r2,0(r11)
|
|
|
|
- mtspr SPRN_HSRR0,r12
|
|
|
|
- hrfid
|
|
|
|
-
|
|
|
|
-return_from_opal_call:
|
|
|
|
- FIXUP_ENDIAN
|
|
|
|
- ld r0,_LINK(r1)
|
|
|
|
- mtlr r0
|
|
|
|
- blr
|
|
|
|
-
|
|
|
|
#define CHECK_HMI_INTERRUPT \
|
|
#define CHECK_HMI_INTERRUPT \
|
|
mfspr r0,SPRN_SRR1; \
|
|
mfspr r0,SPRN_SRR1; \
|
|
BEGIN_FTR_SECTION_NESTED(66); \
|
|
BEGIN_FTR_SECTION_NESTED(66); \
|
|
@@ -197,7 +210,7 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
|
|
ld r2,PACATOC(r13); \
|
|
ld r2,PACATOC(r13); \
|
|
ld r1,PACAR1(r13); \
|
|
ld r1,PACAR1(r13); \
|
|
std r3,ORIG_GPR3(r1); /* Save original r3 */ \
|
|
std r3,ORIG_GPR3(r1); /* Save original r3 */ \
|
|
- li r3,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \
|
|
|
|
|
|
+ li r0,OPAL_HANDLE_HMI; /* Pass opal token argument*/ \
|
|
bl opal_call_realmode; \
|
|
bl opal_call_realmode; \
|
|
ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
|
|
ld r3,ORIG_GPR3(r1); /* Restore original r3 */ \
|
|
20: nop;
|
|
20: nop;
|
|
@@ -206,16 +219,105 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66); \
|
|
_GLOBAL(power7_wakeup_tb_loss)
|
|
_GLOBAL(power7_wakeup_tb_loss)
|
|
ld r2,PACATOC(r13);
|
|
ld r2,PACATOC(r13);
|
|
ld r1,PACAR1(r13)
|
|
ld r1,PACAR1(r13)
|
|
|
|
+ /*
|
|
|
|
+ * Before entering any idle state, the NVGPRs are saved in the stack
|
|
|
|
+ * and they are restored before switching to the process context. Hence
|
|
|
|
+ * until they are restored, they are free to be used.
|
|
|
|
+ *
|
|
|
|
+ * Save SRR1 in a NVGPR as it might be clobbered in opal_call_realmode
|
|
|
|
+ * (called in CHECK_HMI_INTERRUPT). SRR1 is required to determine the
|
|
|
|
+ * wakeup reason if we branch to kvm_start_guest.
|
|
|
|
+ */
|
|
|
|
|
|
|
|
+ mfspr r16,SPRN_SRR1
|
|
BEGIN_FTR_SECTION
|
|
BEGIN_FTR_SECTION
|
|
CHECK_HMI_INTERRUPT
|
|
CHECK_HMI_INTERRUPT
|
|
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
|
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
|
|
|
+
|
|
|
|
+ lbz r7,PACA_THREAD_MASK(r13)
|
|
|
|
+ ld r14,PACA_CORE_IDLE_STATE_PTR(r13)
|
|
|
|
+lwarx_loop2:
|
|
|
|
+ lwarx r15,0,r14
|
|
|
|
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
|
|
|
|
+ /*
|
|
|
|
+ * Lock bit is set in one of the 2 cases-
|
|
|
|
+ * a. In the sleep/winkle enter path, the last thread is executing
|
|
|
|
+ * fastsleep workaround code.
|
|
|
|
+ * b. In the wake up path, another thread is executing fastsleep
|
|
|
|
+ * workaround undo code or resyncing timebase or restoring context
|
|
|
|
+ * In either case loop until the lock bit is cleared.
|
|
|
|
+ */
|
|
|
|
+ bne core_idle_lock_held
|
|
|
|
+
|
|
|
|
+ cmpwi cr2,r15,0
|
|
|
|
+ or r15,r15,r7 /* Set thread bit */
|
|
|
|
+
|
|
|
|
+ beq cr2,first_thread
|
|
|
|
+
|
|
|
|
+ /* Not first thread in core to wake up */
|
|
|
|
+ stwcx. r15,0,r14
|
|
|
|
+ bne- lwarx_loop2
|
|
|
|
+ isync
|
|
|
|
+ b common_exit
|
|
|
|
+
|
|
|
|
+core_idle_lock_held:
|
|
|
|
+ HMT_LOW
|
|
|
|
+core_idle_lock_loop:
|
|
|
|
+ lwz r15,0(14)
|
|
|
|
+ andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT
|
|
|
|
+ bne core_idle_lock_loop
|
|
|
|
+ HMT_MEDIUM
|
|
|
|
+ b lwarx_loop2
|
|
|
|
+
|
|
|
|
+first_thread:
|
|
|
|
+ /* First thread in core to wakeup */
|
|
|
|
+ ori r15,r15,PNV_CORE_IDLE_LOCK_BIT
|
|
|
|
+ stwcx. r15,0,r14
|
|
|
|
+ bne- lwarx_loop2
|
|
|
|
+ isync
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * First thread in the core waking up from fastsleep. It needs to
|
|
|
|
+ * call the fastsleep workaround code if the platform requires it.
|
|
|
|
+ * Call it unconditionally here. The below branch instruction will
|
|
|
|
+ * be patched out when the idle states are discovered if platform
|
|
|
|
+ * does not require workaround.
|
|
|
|
+ */
|
|
|
|
+.global pnv_fastsleep_workaround_at_exit
|
|
|
|
+pnv_fastsleep_workaround_at_exit:
|
|
|
|
+ b fastsleep_workaround_at_exit
|
|
|
|
+
|
|
|
|
+timebase_resync:
|
|
|
|
+ /* Do timebase resync if we are waking up from sleep. Use cr3 value
|
|
|
|
+ * set in exceptions-64s.S */
|
|
|
|
+ ble cr3,clear_lock
|
|
/* Time base re-sync */
|
|
/* Time base re-sync */
|
|
- li r3,OPAL_RESYNC_TIMEBASE
|
|
|
|
|
|
+ li r0,OPAL_RESYNC_TIMEBASE
|
|
bl opal_call_realmode;
|
|
bl opal_call_realmode;
|
|
-
|
|
|
|
/* TODO: Check r3 for failure */
|
|
/* TODO: Check r3 for failure */
|
|
|
|
|
|
|
|
+clear_lock:
|
|
|
|
+ andi. r15,r15,PNV_CORE_IDLE_THREAD_BITS
|
|
|
|
+ lwsync
|
|
|
|
+ stw r15,0(r14)
|
|
|
|
+
|
|
|
|
+common_exit:
|
|
|
|
+ li r5,PNV_THREAD_RUNNING
|
|
|
|
+ stb r5,PACA_THREAD_IDLE_STATE(r13)
|
|
|
|
+
|
|
|
|
+ mtspr SPRN_SRR1,r16
|
|
|
|
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
|
|
|
+ li r0,KVM_HWTHREAD_IN_KERNEL
|
|
|
|
+ stb r0,HSTATE_HWTHREAD_STATE(r13)
|
|
|
|
+ /* Order setting hwthread_state vs. testing hwthread_req */
|
|
|
|
+ sync
|
|
|
|
+ lbz r0,HSTATE_HWTHREAD_REQ(r13)
|
|
|
|
+ cmpwi r0,0
|
|
|
|
+ beq 6f
|
|
|
|
+ b kvm_start_guest
|
|
|
|
+6:
|
|
|
|
+#endif
|
|
|
|
+
|
|
REST_NVGPRS(r1)
|
|
REST_NVGPRS(r1)
|
|
REST_GPR(2, r1)
|
|
REST_GPR(2, r1)
|
|
ld r3,_CCR(r1)
|
|
ld r3,_CCR(r1)
|
|
@@ -228,6 +330,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
|
|
mtspr SPRN_SRR0,r5
|
|
mtspr SPRN_SRR0,r5
|
|
rfid
|
|
rfid
|
|
|
|
|
|
|
|
+fastsleep_workaround_at_exit:
|
|
|
|
+ li r3,1
|
|
|
|
+ li r4,0
|
|
|
|
+ li r0,OPAL_CONFIG_CPU_IDLE_STATE
|
|
|
|
+ bl opal_call_realmode
|
|
|
|
+ b timebase_resync
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* R3 here contains the value that will be returned to the caller
|
|
* R3 here contains the value that will be returned to the caller
|
|
* of power7_nap.
|
|
* of power7_nap.
|