Browse Source

powerpc/e6500: Make TLB lock recursive

Once special level interrupts are supported, we may take nested TLB
misses -- so allow the same thread to acquire the lock recursively.

The lock will not be effective against the nested TLB miss handler
trying to write the same entry as the interrupted TLB miss handler, but
that's also a problem on non-threaded CPUs that lack TLB write
conditional.  This will be addressed in the patch that enables crit/mc
support by invalidating the TLB on return from level exceptions.

Signed-off-by: Scott Wood <scottwood@freescale.com>
Scott Wood 11 years ago
parent
commit
82d86de25b

+ 6 - 3
arch/powerpc/include/asm/mmu-book3e.h

@@ -287,11 +287,14 @@ extern int mmu_linear_psize;
 extern int mmu_vmemmap_psize;
 extern int mmu_vmemmap_psize;
 
 
 struct tlb_core_data {
 struct tlb_core_data {
+	/*
+	 * Per-core spinlock for e6500 TLB handlers (no tlbsrx.)
+	 * Must be the first struct element.
+	 */
+	u8 lock;
+
 	/* For software way selection, as on Freescale TLB1 */
 	/* For software way selection, as on Freescale TLB1 */
 	u8 esel_next, esel_max, esel_first;
 	u8 esel_next, esel_max, esel_first;
-
-	/* Per-core spinlock for e6500 TLB handlers (no tlbsrx.) */
-	u8 lock;
 };
 };
 
 
 #ifdef CONFIG_PPC64
 #ifdef CONFIG_PPC64

+ 2 - 0
arch/powerpc/kernel/setup_64.c

@@ -102,6 +102,8 @@ static void setup_tlb_core_data(void)
 {
 {
 	int cpu;
 	int cpu;
 
 
+	BUILD_BUG_ON(offsetof(struct tlb_core_data, lock) != 0);
+
 	for_each_possible_cpu(cpu) {
 	for_each_possible_cpu(cpu) {
 		int first = cpu_first_thread_sibling(cpu);
 		int first = cpu_first_thread_sibling(cpu);
 
 

+ 12 - 7
arch/powerpc/mm/tlb_low_64e.S

@@ -284,7 +284,7 @@ itlb_miss_fault_bolted:
  * r14 = page table base
  * r14 = page table base
  * r13 = PACA
  * r13 = PACA
  * r11 = tlb_per_core ptr
  * r11 = tlb_per_core ptr
- * r10 = crap (free to use)
+ * r10 = cpu number
  */
  */
 tlb_miss_common_e6500:
 tlb_miss_common_e6500:
 	/*
 	/*
@@ -293,15 +293,18 @@ tlb_miss_common_e6500:
 	 *
 	 *
 	 * MAS6:IND should be already set based on MAS4
 	 * MAS6:IND should be already set based on MAS4
 	 */
 	 */
-	addi	r10,r11,TCD_LOCK
-1:	lbarx	r15,0,r10
+1:	lbarx	r15,0,r11
+	lhz	r10,PACAPACAINDEX(r13)
 	cmpdi	r15,0
 	cmpdi	r15,0
+	cmpdi	cr1,r15,1	/* set cr1.eq = 0 for non-recursive */
 	bne	2f
 	bne	2f
-	li	r15,1
-	stbcx.	r15,0,r10
+	stbcx.	r10,0,r11
 	bne	1b
 	bne	1b
+3:
 	.subsection 1
 	.subsection 1
-2:	lbz	r15,0(r10)
+2:	cmpd	cr1,r15,r10	/* recursive lock due to mcheck/crit/etc? */
+	beq	cr1,3b		/* unlock will happen if cr1.eq = 0 */
+	lbz	r15,0(r11)
 	cmpdi	r15,0
 	cmpdi	r15,0
 	bne	2b
 	bne	2b
 	b	1b
 	b	1b
@@ -379,9 +382,11 @@ tlb_miss_common_e6500:
 
 
 tlb_miss_done_e6500:
 tlb_miss_done_e6500:
 	.macro	tlb_unlock_e6500
 	.macro	tlb_unlock_e6500
+	beq	cr1,1f		/* no unlock if lock was recursively grabbed */
 	li	r15,0
 	li	r15,0
 	isync
 	isync
-	stb	r15,TCD_LOCK(r11)
+	stb	r15,0(r11)
+1:
 	.endm
 	.endm
 
 
 	tlb_unlock_e6500
 	tlb_unlock_e6500