tlb_low_64e.S 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263
  1. /*
  2. * Low level TLB miss handlers for Book3E
  3. *
  4. * Copyright (C) 2008-2009
  5. * Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #include <asm/processor.h>
  13. #include <asm/reg.h>
  14. #include <asm/page.h>
  15. #include <asm/mmu.h>
  16. #include <asm/ppc_asm.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/cputable.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/exception-64e.h>
  21. #include <asm/ppc-opcode.h>
  22. #include <asm/kvm_asm.h>
  23. #include <asm/kvm_booke_hv_asm.h>
  24. #ifdef CONFIG_PPC_64K_PAGES
  25. #define VPTE_PMD_SHIFT (PTE_INDEX_SIZE+1)
  26. #else
  27. #define VPTE_PMD_SHIFT (PTE_INDEX_SIZE)
  28. #endif
  29. #define VPTE_PUD_SHIFT (VPTE_PMD_SHIFT + PMD_INDEX_SIZE)
  30. #define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
  31. #define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
  32. /**********************************************************************
  33. * *
  34. * TLB miss handling for Book3E with a bolted linear mapping *
  35. * No virtual page table, no nested TLB misses *
  36. * *
  37. **********************************************************************/
  38. /*
  39. * Note that, unlike non-bolted handlers, TLB_EXFRAME is not
  40. * modified by the TLB miss handlers themselves, since the TLB miss
  41. * handler code will not itself cause a recursive TLB miss.
  42. *
  43. * TLB_EXFRAME will be modified when crit/mc/debug exceptions are
  44. * entered/exited.
  45. */
  46. .macro tlb_prolog_bolted intnum addr
  47. mtspr SPRN_SPRG_GEN_SCRATCH,r12
  48. mfspr r12,SPRN_SPRG_TLB_EXFRAME
  49. std r13,EX_TLB_R13(r12)
  50. std r10,EX_TLB_R10(r12)
  51. mfspr r13,SPRN_SPRG_PACA
  52. mfcr r10
  53. std r11,EX_TLB_R11(r12)
  54. #ifdef CONFIG_KVM_BOOKE_HV
  55. BEGIN_FTR_SECTION
  56. mfspr r11, SPRN_SRR1
  57. END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
  58. #endif
  59. DO_KVM \intnum, SPRN_SRR1
  60. std r16,EX_TLB_R16(r12)
  61. mfspr r16,\addr /* get faulting address */
  62. std r14,EX_TLB_R14(r12)
  63. ld r14,PACAPGD(r13)
  64. std r15,EX_TLB_R15(r12)
  65. std r10,EX_TLB_CR(r12)
  66. TLB_MISS_PROLOG_STATS
  67. .endm
  68. .macro tlb_epilog_bolted
  69. ld r14,EX_TLB_CR(r12)
  70. ld r10,EX_TLB_R10(r12)
  71. ld r11,EX_TLB_R11(r12)
  72. ld r13,EX_TLB_R13(r12)
  73. mtcr r14
  74. ld r14,EX_TLB_R14(r12)
  75. ld r15,EX_TLB_R15(r12)
  76. TLB_MISS_RESTORE_STATS
  77. ld r16,EX_TLB_R16(r12)
  78. mfspr r12,SPRN_SPRG_GEN_SCRATCH
  79. .endm
  80. /* Data TLB miss */
  81. START_EXCEPTION(data_tlb_miss_bolted)
  82. tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
  83. /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
  84. /* We do the user/kernel test for the PID here along with the RW test
  85. */
  86. /* We pre-test some combination of permissions to avoid double
  87. * faults:
  88. *
  89. * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
  90. * ESR_ST is 0x00800000
  91. * _PAGE_BAP_SW is 0x00000010
  92. * So the shift is >> 19. This tests for supervisor writeability.
  93. * If the page happens to be supervisor writeable and not user
  94. * writeable, we will take a new fault later, but that should be
  95. * a rare enough case.
  96. *
  97. * We also move ESR_ST in _PAGE_DIRTY position
  98. * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
  99. *
  100. * MAS1 is preset for all we need except for TID that needs to
  101. * be cleared for kernel translations
  102. */
  103. mfspr r11,SPRN_ESR
  104. srdi r15,r16,60 /* get region */
  105. rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
  106. bne- dtlb_miss_fault_bolted /* Bail if fault addr is invalid */
  107. rlwinm r10,r11,32-19,27,27
  108. rlwimi r10,r11,32-16,19,19
  109. cmpwi r15,0 /* user vs kernel check */
  110. ori r10,r10,_PAGE_PRESENT
  111. oris r11,r10,_PAGE_ACCESSED@h
  112. TLB_MISS_STATS_SAVE_INFO_BOLTED
  113. bne tlb_miss_kernel_bolted
  114. tlb_miss_common_bolted:
  115. /*
  116. * This is the guts of the TLB miss handler for bolted-linear.
  117. * We are entered with:
  118. *
  119. * r16 = faulting address
  120. * r15 = crap (free to use)
  121. * r14 = page table base
  122. * r13 = PACA
  123. * r11 = PTE permission mask
  124. * r10 = crap (free to use)
  125. */
  126. rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
  127. cmpldi cr0,r14,0
  128. clrrdi r15,r15,3
  129. beq tlb_miss_fault_bolted /* No PGDIR, bail */
  130. BEGIN_MMU_FTR_SECTION
  131. /* Set the TLB reservation and search for existing entry. Then load
  132. * the entry.
  133. */
  134. PPC_TLBSRX_DOT(0,R16)
  135. ldx r14,r14,r15 /* grab pgd entry */
  136. beq tlb_miss_done_bolted /* tlb exists already, bail */
  137. MMU_FTR_SECTION_ELSE
  138. ldx r14,r14,r15 /* grab pgd entry */
  139. ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
  140. #ifndef CONFIG_PPC_64K_PAGES
  141. rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
  142. clrrdi r15,r15,3
  143. cmpdi cr0,r14,0
  144. bge tlb_miss_fault_bolted /* Bad pgd entry or hugepage; bail */
  145. ldx r14,r14,r15 /* grab pud entry */
  146. #endif /* CONFIG_PPC_64K_PAGES */
  147. rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
  148. clrrdi r15,r15,3
  149. cmpdi cr0,r14,0
  150. bge tlb_miss_fault_bolted
  151. ldx r14,r14,r15 /* Grab pmd entry */
  152. rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
  153. clrrdi r15,r15,3
  154. cmpdi cr0,r14,0
  155. bge tlb_miss_fault_bolted
  156. ldx r14,r14,r15 /* Grab PTE, normal (!huge) page */
  157. /* Check if required permissions are met */
  158. andc. r15,r11,r14
  159. rldicr r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
  160. bne- tlb_miss_fault_bolted
  161. /* Now we build the MAS:
  162. *
  163. * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
  164. * MAS 1 : Almost fully setup
  165. * - PID already updated by caller if necessary
  166. * - TSIZE need change if !base page size, not
  167. * yet implemented for now
  168. * MAS 2 : Defaults not useful, need to be redone
  169. * MAS 3+7 : Needs to be done
  170. */
  171. clrrdi r11,r16,12 /* Clear low crap in EA */
  172. clrldi r15,r15,12 /* Clear crap at the top */
  173. rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
  174. rlwimi r15,r14,32-8,22,25 /* Move in U bits */
  175. mtspr SPRN_MAS2,r11
  176. andi. r11,r14,_PAGE_DIRTY
  177. rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */
  178. /* Mask out SW and UW if !DIRTY (XXX optimize this !) */
  179. bne 1f
  180. li r11,MAS3_SW|MAS3_UW
  181. andc r15,r15,r11
  182. 1:
  183. mtspr SPRN_MAS7_MAS3,r15
  184. tlbwe
  185. tlb_miss_done_bolted:
  186. TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
  187. tlb_epilog_bolted
  188. rfi
  189. itlb_miss_kernel_bolted:
  190. li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
  191. oris r11,r11,_PAGE_ACCESSED@h
  192. tlb_miss_kernel_bolted:
  193. mfspr r10,SPRN_MAS1
  194. ld r14,PACA_KERNELPGD(r13)
  195. cmpldi cr0,r15,8 /* Check for vmalloc region */
  196. rlwinm r10,r10,0,16,1 /* Clear TID */
  197. mtspr SPRN_MAS1,r10
  198. beq+ tlb_miss_common_bolted
  199. tlb_miss_fault_bolted:
  200. /* We need to check if it was an instruction miss */
  201. andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
  202. bne itlb_miss_fault_bolted
  203. dtlb_miss_fault_bolted:
  204. TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
  205. tlb_epilog_bolted
  206. b exc_data_storage_book3e
  207. itlb_miss_fault_bolted:
  208. TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
  209. tlb_epilog_bolted
  210. b exc_instruction_storage_book3e
  211. /* Instruction TLB miss */
  212. START_EXCEPTION(instruction_tlb_miss_bolted)
  213. tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
  214. rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
  215. srdi r15,r16,60 /* get region */
  216. TLB_MISS_STATS_SAVE_INFO_BOLTED
  217. bne- itlb_miss_fault_bolted
  218. li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
  219. /* We do the user/kernel test for the PID here along with the RW test
  220. */
  221. cmpldi cr0,r15,0 /* Check for user region */
  222. oris r11,r11,_PAGE_ACCESSED@h
  223. beq tlb_miss_common_bolted
  224. b itlb_miss_kernel_bolted
  225. #ifdef CONFIG_PPC_FSL_BOOK3E
  226. /*
  227. * TLB miss handling for e6500 and derivatives, using hardware tablewalk.
  228. *
  229. * Linear mapping is bolted: no virtual page table or nested TLB misses
  230. * Indirect entries in TLB1, hardware loads resulting direct entries
  231. * into TLB0
  232. * No HES or NV hint on TLB1, so we need to do software round-robin
  233. * No tlbsrx. so we need a spinlock, and we have to deal
  234. * with MAS-damage caused by tlbsx
  235. * 4K pages only
  236. */
  237. START_EXCEPTION(instruction_tlb_miss_e6500)
  238. tlb_prolog_bolted BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR0
  239. ld r11,PACA_TCD_PTR(r13)
  240. srdi. r15,r16,60 /* get region */
  241. ori r16,r16,1
  242. TLB_MISS_STATS_SAVE_INFO_BOLTED
  243. bne tlb_miss_kernel_e6500 /* user/kernel test */
  244. b tlb_miss_common_e6500
  245. START_EXCEPTION(data_tlb_miss_e6500)
  246. tlb_prolog_bolted BOOKE_INTERRUPT_DTLB_MISS SPRN_DEAR
  247. ld r11,PACA_TCD_PTR(r13)
  248. srdi. r15,r16,60 /* get region */
  249. rldicr r16,r16,0,62
  250. TLB_MISS_STATS_SAVE_INFO_BOLTED
  251. bne tlb_miss_kernel_e6500 /* user vs kernel check */
  252. /*
  253. * This is the guts of the TLB miss handler for e6500 and derivatives.
  254. * We are entered with:
  255. *
  256. * r16 = page of faulting address (low bit 0 if data, 1 if instruction)
  257. * r15 = crap (free to use)
  258. * r14 = page table base
  259. * r13 = PACA
  260. * r11 = tlb_per_core ptr
  261. * r10 = crap (free to use)
  262. */
  263. tlb_miss_common_e6500:
  264. crmove cr2*4+2,cr0*4+2 /* cr2.eq != 0 if kernel address */
  265. BEGIN_FTR_SECTION /* CPU_FTR_SMT */
  266. /*
  267. * Search if we already have an indirect entry for that virtual
  268. * address, and if we do, bail out.
  269. *
  270. * MAS6:IND should be already set based on MAS4
  271. */
  272. 1: lbarx r15,0,r11
  273. lhz r10,PACAPACAINDEX(r13)
  274. cmpdi r15,0
  275. cmpdi cr1,r15,1 /* set cr1.eq = 0 for non-recursive */
  276. addi r10,r10,1
  277. bne 2f
  278. stbcx. r10,0,r11
  279. bne 1b
  280. 3:
  281. .subsection 1
  282. 2: cmpd cr1,r15,r10 /* recursive lock due to mcheck/crit/etc? */
  283. beq cr1,3b /* unlock will happen if cr1.eq = 0 */
  284. lbz r15,0(r11)
  285. cmpdi r15,0
  286. bne 2b
  287. b 1b
  288. .previous
  289. /*
  290. * Erratum A-008139 says that we can't use tlbwe to change
  291. * an indirect entry in any way (including replacing or
  292. * invalidating) if the other thread could be in the process
  293. * of a lookup. The workaround is to invalidate the entry
  294. * with tlbilx before overwriting.
  295. */
  296. lbz r15,TCD_ESEL_NEXT(r11)
  297. rlwinm r10,r15,16,0xff0000
  298. oris r10,r10,MAS0_TLBSEL(1)@h
  299. mtspr SPRN_MAS0,r10
  300. isync
  301. tlbre
  302. mfspr r15,SPRN_MAS1
  303. andis. r15,r15,MAS1_VALID@h
  304. beq 5f
  305. BEGIN_FTR_SECTION_NESTED(532)
  306. mfspr r10,SPRN_MAS8
  307. rlwinm r10,r10,0,0x80000fff /* tgs,tlpid -> sgs,slpid */
  308. mtspr SPRN_MAS5,r10
  309. END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532)
  310. mfspr r10,SPRN_MAS1
  311. rlwinm r15,r10,0,0x3fff0000 /* tid -> spid */
  312. rlwimi r15,r10,20,0x00000003 /* ind,ts -> sind,sas */
  313. mfspr r10,SPRN_MAS6
  314. mtspr SPRN_MAS6,r15
  315. mfspr r15,SPRN_MAS2
  316. isync
  317. tlbilxva 0,r15
  318. isync
  319. mtspr SPRN_MAS6,r10
  320. 5:
  321. BEGIN_FTR_SECTION_NESTED(532)
  322. li r10,0
  323. mtspr SPRN_MAS8,r10
  324. mtspr SPRN_MAS5,r10
  325. END_FTR_SECTION_NESTED(CPU_FTR_EMB_HV,CPU_FTR_EMB_HV,532)
  326. tlbsx 0,r16
  327. mfspr r10,SPRN_MAS1
  328. andis. r15,r10,MAS1_VALID@h
  329. bne tlb_miss_done_e6500
  330. FTR_SECTION_ELSE
  331. mfspr r10,SPRN_MAS1
  332. ALT_FTR_SECTION_END_IFSET(CPU_FTR_SMT)
  333. oris r10,r10,MAS1_VALID@h
  334. beq cr2,4f
  335. rlwinm r10,r10,0,16,1 /* Clear TID */
  336. 4: mtspr SPRN_MAS1,r10
  337. /* Now, we need to walk the page tables. First check if we are in
  338. * range.
  339. */
  340. rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
  341. bne- tlb_miss_fault_e6500
  342. rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
  343. cmpldi cr0,r14,0
  344. clrrdi r15,r15,3
  345. beq- tlb_miss_fault_e6500 /* No PGDIR, bail */
  346. ldx r14,r14,r15 /* grab pgd entry */
  347. rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
  348. clrrdi r15,r15,3
  349. cmpdi cr0,r14,0
  350. bge tlb_miss_huge_e6500 /* Bad pgd entry or hugepage; bail */
  351. ldx r14,r14,r15 /* grab pud entry */
  352. rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
  353. clrrdi r15,r15,3
  354. cmpdi cr0,r14,0
  355. bge tlb_miss_huge_e6500
  356. ldx r14,r14,r15 /* Grab pmd entry */
  357. mfspr r10,SPRN_MAS0
  358. cmpdi cr0,r14,0
  359. bge tlb_miss_huge_e6500
  360. /* Now we build the MAS for a 2M indirect page:
  361. *
  362. * MAS 0 : ESEL needs to be filled by software round-robin
  363. * MAS 1 : Fully set up
  364. * - PID already updated by caller if necessary
  365. * - TSIZE for now is base ind page size always
  366. * - TID already cleared if necessary
  367. * MAS 2 : Default not 2M-aligned, need to be redone
  368. * MAS 3+7 : Needs to be done
  369. */
  370. ori r14,r14,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
  371. mtspr SPRN_MAS7_MAS3,r14
  372. clrrdi r15,r16,21 /* make EA 2M-aligned */
  373. mtspr SPRN_MAS2,r15
  374. tlb_miss_huge_done_e6500:
  375. lbz r15,TCD_ESEL_NEXT(r11)
  376. lbz r16,TCD_ESEL_MAX(r11)
  377. lbz r14,TCD_ESEL_FIRST(r11)
  378. rlwimi r10,r15,16,0x00ff0000 /* insert esel_next into MAS0 */
  379. addi r15,r15,1 /* increment esel_next */
  380. mtspr SPRN_MAS0,r10
  381. cmpw r15,r16
  382. iseleq r15,r14,r15 /* if next == last use first */
  383. stb r15,TCD_ESEL_NEXT(r11)
  384. tlbwe
  385. tlb_miss_done_e6500:
  386. .macro tlb_unlock_e6500
  387. BEGIN_FTR_SECTION
  388. beq cr1,1f /* no unlock if lock was recursively grabbed */
  389. li r15,0
  390. isync
  391. stb r15,0(r11)
  392. 1:
  393. END_FTR_SECTION_IFSET(CPU_FTR_SMT)
  394. .endm
  395. tlb_unlock_e6500
  396. TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
  397. tlb_epilog_bolted
  398. rfi
  399. tlb_miss_huge_e6500:
  400. beq tlb_miss_fault_e6500
  401. li r10,1
  402. andi. r15,r14,HUGEPD_SHIFT_MASK@l /* r15 = psize */
  403. rldimi r14,r10,63,0 /* Set PD_HUGE */
  404. xor r14,r14,r15 /* Clear size bits */
  405. ldx r14,0,r14
  406. /*
  407. * Now we build the MAS for a huge page.
  408. *
  409. * MAS 0 : ESEL needs to be filled by software round-robin
  410. * - can be handled by indirect code
  411. * MAS 1 : Need to clear IND and set TSIZE
  412. * MAS 2,3+7: Needs to be redone similar to non-tablewalk handler
  413. */
  414. subi r15,r15,10 /* Convert psize to tsize */
  415. mfspr r10,SPRN_MAS1
  416. rlwinm r10,r10,0,~MAS1_IND
  417. rlwimi r10,r15,MAS1_TSIZE_SHIFT,MAS1_TSIZE_MASK
  418. mtspr SPRN_MAS1,r10
  419. li r10,-0x400
  420. sld r15,r10,r15 /* Generate mask based on size */
  421. and r10,r16,r15
  422. rldicr r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
  423. rlwimi r10,r14,32-19,27,31 /* Insert WIMGE */
  424. clrldi r15,r15,PAGE_SHIFT /* Clear crap at the top */
  425. rlwimi r15,r14,32-8,22,25 /* Move in U bits */
  426. mtspr SPRN_MAS2,r10
  427. andi. r10,r14,_PAGE_DIRTY
  428. rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */
  429. /* Mask out SW and UW if !DIRTY (XXX optimize this !) */
  430. bne 1f
  431. li r10,MAS3_SW|MAS3_UW
  432. andc r15,r15,r10
  433. 1:
  434. mtspr SPRN_MAS7_MAS3,r15
  435. mfspr r10,SPRN_MAS0
  436. b tlb_miss_huge_done_e6500
  437. tlb_miss_kernel_e6500:
  438. ld r14,PACA_KERNELPGD(r13)
  439. cmpldi cr1,r15,8 /* Check for vmalloc region */
  440. beq+ cr1,tlb_miss_common_e6500
  441. tlb_miss_fault_e6500:
  442. tlb_unlock_e6500
  443. /* We need to check if it was an instruction miss */
  444. andi. r16,r16,1
  445. bne itlb_miss_fault_e6500
  446. dtlb_miss_fault_e6500:
  447. TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
  448. tlb_epilog_bolted
  449. b exc_data_storage_book3e
  450. itlb_miss_fault_e6500:
  451. TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
  452. tlb_epilog_bolted
  453. b exc_instruction_storage_book3e
  454. #endif /* CONFIG_PPC_FSL_BOOK3E */
  455. /**********************************************************************
  456. * *
  457. * TLB miss handling for Book3E with TLB reservation and HES support *
  458. * *
  459. **********************************************************************/
  460. /* Data TLB miss */
  461. START_EXCEPTION(data_tlb_miss)
  462. TLB_MISS_PROLOG
  463. /* Now we handle the fault proper. We only save DEAR in normal
  464. * fault case since that's the only interesting values here.
  465. * We could probably also optimize by not saving SRR0/1 in the
  466. * linear mapping case but I'll leave that for later
  467. */
  468. mfspr r14,SPRN_ESR
  469. mfspr r16,SPRN_DEAR /* get faulting address */
  470. srdi r15,r16,60 /* get region */
  471. cmpldi cr0,r15,0xc /* linear mapping ? */
  472. TLB_MISS_STATS_SAVE_INFO
  473. beq tlb_load_linear /* yes -> go to linear map load */
  474. /* The page tables are mapped virtually linear. At this point, though,
  475. * we don't know whether we are trying to fault in a first level
  476. * virtual address or a virtual page table address. We can get that
  477. * from bit 0x1 of the region ID which we have set for a page table
  478. */
  479. andi. r10,r15,0x1
  480. bne- virt_page_table_tlb_miss
  481. std r14,EX_TLB_ESR(r12); /* save ESR */
  482. std r16,EX_TLB_DEAR(r12); /* save DEAR */
  483. /* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
  484. li r11,_PAGE_PRESENT
  485. oris r11,r11,_PAGE_ACCESSED@h
  486. /* We do the user/kernel test for the PID here along with the RW test
  487. */
  488. cmpldi cr0,r15,0 /* Check for user region */
  489. /* We pre-test some combination of permissions to avoid double
  490. * faults:
  491. *
  492. * We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
  493. * ESR_ST is 0x00800000
  494. * _PAGE_BAP_SW is 0x00000010
  495. * So the shift is >> 19. This tests for supervisor writeability.
  496. * If the page happens to be supervisor writeable and not user
  497. * writeable, we will take a new fault later, but that should be
  498. * a rare enough case.
  499. *
  500. * We also move ESR_ST in _PAGE_DIRTY position
  501. * _PAGE_DIRTY is 0x00001000 so the shift is >> 11
  502. *
  503. * MAS1 is preset for all we need except for TID that needs to
  504. * be cleared for kernel translations
  505. */
  506. rlwimi r11,r14,32-19,27,27
  507. rlwimi r11,r14,32-16,19,19
  508. beq normal_tlb_miss
  509. /* XXX replace the RMW cycles with immediate loads + writes */
  510. 1: mfspr r10,SPRN_MAS1
  511. cmpldi cr0,r15,8 /* Check for vmalloc region */
  512. rlwinm r10,r10,0,16,1 /* Clear TID */
  513. mtspr SPRN_MAS1,r10
  514. beq+ normal_tlb_miss
  515. /* We got a crappy address, just fault with whatever DEAR and ESR
  516. * are here
  517. */
  518. TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
  519. TLB_MISS_EPILOG_ERROR
  520. b exc_data_storage_book3e
  521. /* Instruction TLB miss */
  522. START_EXCEPTION(instruction_tlb_miss)
  523. TLB_MISS_PROLOG
  524. /* If we take a recursive fault, the second level handler may need
  525. * to know whether we are handling a data or instruction fault in
  526. * order to get to the right store fault handler. We provide that
  527. * info by writing a crazy value in ESR in our exception frame
  528. */
  529. li r14,-1 /* store to exception frame is done later */
  530. /* Now we handle the fault proper. We only save DEAR in the non
  531. * linear mapping case since we know the linear mapping case will
  532. * not re-enter. We could indeed optimize and also not save SRR0/1
  533. * in the linear mapping case but I'll leave that for later
  534. *
  535. * Faulting address is SRR0 which is already in r16
  536. */
  537. srdi r15,r16,60 /* get region */
  538. cmpldi cr0,r15,0xc /* linear mapping ? */
  539. TLB_MISS_STATS_SAVE_INFO
  540. beq tlb_load_linear /* yes -> go to linear map load */
  541. /* We do the user/kernel test for the PID here along with the RW test
  542. */
  543. li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
  544. oris r11,r11,_PAGE_ACCESSED@h
  545. cmpldi cr0,r15,0 /* Check for user region */
  546. std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */
  547. beq normal_tlb_miss
  548. li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
  549. oris r11,r11,_PAGE_ACCESSED@h
  550. /* XXX replace the RMW cycles with immediate loads + writes */
  551. mfspr r10,SPRN_MAS1
  552. cmpldi cr0,r15,8 /* Check for vmalloc region */
  553. rlwinm r10,r10,0,16,1 /* Clear TID */
  554. mtspr SPRN_MAS1,r10
  555. beq+ normal_tlb_miss
  556. /* We got a crappy address, just fault */
  557. TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
  558. TLB_MISS_EPILOG_ERROR
  559. b exc_instruction_storage_book3e
  560. /*
  561. * This is the guts of the first-level TLB miss handler for direct
  562. * misses. We are entered with:
  563. *
  564. * r16 = faulting address
  565. * r15 = region ID
  566. * r14 = crap (free to use)
  567. * r13 = PACA
  568. * r12 = TLB exception frame in PACA
  569. * r11 = PTE permission mask
  570. * r10 = crap (free to use)
  571. */
  572. normal_tlb_miss:
  573. /* So we first construct the page table address. We do that by
  574. * shifting the bottom of the address (not the region ID) by
  575. * PAGE_SHIFT-3, clearing the bottom 3 bits (get a PTE ptr) and
  576. * or'ing the fourth high bit.
  577. *
  578. * NOTE: For 64K pages, we do things slightly differently in
  579. * order to handle the weird page table format used by linux
  580. */
  581. ori r10,r15,0x1
  582. #ifdef CONFIG_PPC_64K_PAGES
  583. /* For the top bits, 16 bytes per PTE */
  584. rldicl r14,r16,64-(PAGE_SHIFT-4),PAGE_SHIFT-4+4
  585. /* Now create the bottom bits as 0 in position 0x8000 and
  586. * the rest calculated for 8 bytes per PTE
  587. */
  588. rldicl r15,r16,64-(PAGE_SHIFT-3),64-15
  589. /* Insert the bottom bits in */
  590. rlwimi r14,r15,0,16,31
  591. #else
  592. rldicl r14,r16,64-(PAGE_SHIFT-3),PAGE_SHIFT-3+4
  593. #endif
  594. sldi r15,r10,60
  595. clrrdi r14,r14,3
  596. or r10,r15,r14
  597. BEGIN_MMU_FTR_SECTION
  598. /* Set the TLB reservation and search for existing entry. Then load
  599. * the entry.
  600. */
  601. PPC_TLBSRX_DOT(0,R16)
  602. ld r14,0(r10)
  603. beq normal_tlb_miss_done
  604. MMU_FTR_SECTION_ELSE
  605. ld r14,0(r10)
  606. ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
  607. finish_normal_tlb_miss:
  608. /* Check if required permissions are met */
  609. andc. r15,r11,r14
  610. bne- normal_tlb_miss_access_fault
  611. /* Now we build the MAS:
  612. *
  613. * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
  614. * MAS 1 : Almost fully setup
  615. * - PID already updated by caller if necessary
  616. * - TSIZE need change if !base page size, not
  617. * yet implemented for now
  618. * MAS 2 : Defaults not useful, need to be redone
  619. * MAS 3+7 : Needs to be done
  620. *
  621. * TODO: mix up code below for better scheduling
  622. */
  623. clrrdi r11,r16,12 /* Clear low crap in EA */
  624. rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
  625. mtspr SPRN_MAS2,r11
  626. /* Check page size, if not standard, update MAS1 */
  627. rldicl r11,r14,64-8,64-8
  628. #ifdef CONFIG_PPC_64K_PAGES
  629. cmpldi cr0,r11,BOOK3E_PAGESZ_64K
  630. #else
  631. cmpldi cr0,r11,BOOK3E_PAGESZ_4K
  632. #endif
  633. beq- 1f
  634. mfspr r11,SPRN_MAS1
  635. rlwimi r11,r14,31,21,24
  636. rlwinm r11,r11,0,21,19
  637. mtspr SPRN_MAS1,r11
  638. 1:
  639. /* Move RPN in position */
  640. rldicr r11,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
  641. clrldi r15,r11,12 /* Clear crap at the top */
  642. rlwimi r15,r14,32-8,22,25 /* Move in U bits */
  643. rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */
  644. /* Mask out SW and UW if !DIRTY (XXX optimize this !) */
  645. andi. r11,r14,_PAGE_DIRTY
  646. bne 1f
  647. li r11,MAS3_SW|MAS3_UW
  648. andc r15,r15,r11
  649. 1:
  650. BEGIN_MMU_FTR_SECTION
  651. srdi r16,r15,32
  652. mtspr SPRN_MAS3,r15
  653. mtspr SPRN_MAS7,r16
  654. MMU_FTR_SECTION_ELSE
  655. mtspr SPRN_MAS7_MAS3,r15
  656. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
  657. tlbwe
  658. normal_tlb_miss_done:
  659. /* We don't bother with restoring DEAR or ESR since we know we are
  660. * level 0 and just going back to userland. They are only needed
  661. * if you are going to take an access fault
  662. */
  663. TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
  664. TLB_MISS_EPILOG_SUCCESS
  665. rfi
  666. normal_tlb_miss_access_fault:
  667. /* We need to check if it was an instruction miss */
  668. andi. r10,r11,_PAGE_EXEC
  669. bne 1f
  670. ld r14,EX_TLB_DEAR(r12)
  671. ld r15,EX_TLB_ESR(r12)
  672. mtspr SPRN_DEAR,r14
  673. mtspr SPRN_ESR,r15
  674. TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
  675. TLB_MISS_EPILOG_ERROR
  676. b exc_data_storage_book3e
  677. 1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
  678. TLB_MISS_EPILOG_ERROR
  679. b exc_instruction_storage_book3e
  680. /*
  681. * This is the guts of the second-level TLB miss handler for direct
  682. * misses. We are entered with:
  683. *
  684. * r16 = virtual page table faulting address
  685. * r15 = region (top 4 bits of address)
  686. * r14 = crap (free to use)
  687. * r13 = PACA
  688. * r12 = TLB exception frame in PACA
  689. * r11 = crap (free to use)
  690. * r10 = crap (free to use)
  691. *
  692. * Note that this should only ever be called as a second level handler
  693. * with the current scheme when using SW load.
  694. * That means we can always get the original fault DEAR at
  695. * EX_TLB_DEAR-EX_TLB_SIZE(r12)
  696. *
  697. * It can be re-entered by the linear mapping miss handler. However, to
  698. * avoid too much complication, it will restart the whole fault at level
  699. * 0 so we don't care too much about clobbers
  700. *
  701. * XXX That code was written back when we couldn't clobber r14. We can now,
  702. * so we could probably optimize things a bit
  703. */
  704. virt_page_table_tlb_miss:
  705. /* Are we hitting a kernel page table ? */
  706. andi. r10,r15,0x8
  707. /* The cool thing now is that r10 contains 0 for user and 8 for kernel,
  708. * and we happen to have the swapper_pg_dir at offset 8 from the user
  709. * pgdir in the PACA :-).
  710. */
  711. add r11,r10,r13
  712. /* If kernel, we need to clear MAS1 TID */
  713. beq 1f
  714. /* XXX replace the RMW cycles with immediate loads + writes */
  715. mfspr r10,SPRN_MAS1
  716. rlwinm r10,r10,0,16,1 /* Clear TID */
  717. mtspr SPRN_MAS1,r10
  718. 1:
  719. BEGIN_MMU_FTR_SECTION
  720. /* Search if we already have a TLB entry for that virtual address, and
  721. * if we do, bail out.
  722. */
  723. PPC_TLBSRX_DOT(0,R16)
  724. beq virt_page_table_tlb_miss_done
  725. END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
  726. /* Now, we need to walk the page tables. First check if we are in
  727. * range.
  728. */
  729. rldicl. r10,r16,64-(VPTE_INDEX_SIZE+3),VPTE_INDEX_SIZE+3+4
  730. bne- virt_page_table_tlb_miss_fault
  731. /* Get the PGD pointer */
  732. ld r15,PACAPGD(r11)
  733. cmpldi cr0,r15,0
  734. beq- virt_page_table_tlb_miss_fault
  735. /* Get to PGD entry */
  736. rldicl r11,r16,64-VPTE_PGD_SHIFT,64-PGD_INDEX_SIZE-3
  737. clrrdi r10,r11,3
  738. ldx r15,r10,r15
  739. cmpdi cr0,r15,0
  740. bge virt_page_table_tlb_miss_fault
  741. #ifndef CONFIG_PPC_64K_PAGES
  742. /* Get to PUD entry */
  743. rldicl r11,r16,64-VPTE_PUD_SHIFT,64-PUD_INDEX_SIZE-3
  744. clrrdi r10,r11,3
  745. ldx r15,r10,r15
  746. cmpdi cr0,r15,0
  747. bge virt_page_table_tlb_miss_fault
  748. #endif /* CONFIG_PPC_64K_PAGES */
  749. /* Get to PMD entry */
  750. rldicl r11,r16,64-VPTE_PMD_SHIFT,64-PMD_INDEX_SIZE-3
  751. clrrdi r10,r11,3
  752. ldx r15,r10,r15
  753. cmpdi cr0,r15,0
  754. bge virt_page_table_tlb_miss_fault
  755. /* Ok, we're all right, we can now create a kernel translation for
  756. * a 4K or 64K page from r16 -> r15.
  757. */
  758. /* Now we build the MAS:
  759. *
  760. * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
  761. * MAS 1 : Almost fully setup
  762. * - PID already updated by caller if necessary
  763. * - TSIZE for now is base page size always
  764. * MAS 2 : Use defaults
  765. * MAS 3+7 : Needs to be done
  766. *
  767. * So we only do MAS 2 and 3 for now...
  768. */
  769. clrldi r11,r15,4 /* remove region ID from RPN */
  770. ori r10,r11,1 /* Or-in SR */
  771. BEGIN_MMU_FTR_SECTION
  772. srdi r16,r10,32
  773. mtspr SPRN_MAS3,r10
  774. mtspr SPRN_MAS7,r16
  775. MMU_FTR_SECTION_ELSE
  776. mtspr SPRN_MAS7_MAS3,r10
  777. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
  778. tlbwe
  779. BEGIN_MMU_FTR_SECTION
  780. virt_page_table_tlb_miss_done:
  781. /* We have overriden MAS2:EPN but currently our primary TLB miss
  782. * handler will always restore it so that should not be an issue,
  783. * if we ever optimize the primary handler to not write MAS2 on
  784. * some cases, we'll have to restore MAS2:EPN here based on the
  785. * original fault's DEAR. If we do that we have to modify the
  786. * ITLB miss handler to also store SRR0 in the exception frame
  787. * as DEAR.
  788. *
  789. * However, one nasty thing we did is we cleared the reservation
  790. * (well, potentially we did). We do a trick here thus if we
  791. * are not a level 0 exception (we interrupted the TLB miss) we
  792. * offset the return address by -4 in order to replay the tlbsrx
  793. * instruction there
  794. */
  795. subf r10,r13,r12
  796. cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
  797. bne- 1f
  798. ld r11,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
  799. addi r10,r11,-4
  800. std r10,PACA_EXTLB+EX_TLB_SIZE+EX_TLB_SRR0(r13)
  801. 1:
  802. END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV)
  803. /* Return to caller, normal case */
  804. TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK);
  805. TLB_MISS_EPILOG_SUCCESS
  806. rfi
  807. virt_page_table_tlb_miss_fault:
  808. /* If we fault here, things are a little bit tricky. We need to call
  809. * either data or instruction store fault, and we need to retrieve
  810. * the original fault address and ESR (for data).
  811. *
  812. * The thing is, we know that in normal circumstances, this is
  813. * always called as a second level tlb miss for SW load or as a first
  814. * level TLB miss for HW load, so we should be able to peek at the
  815. * relevant information in the first exception frame in the PACA.
  816. *
  817. * However, we do need to double check that, because we may just hit
  818. * a stray kernel pointer or a userland attack trying to hit those
  819. * areas. If that is the case, we do a data fault. (We can't get here
  820. * from an instruction tlb miss anyway).
  821. *
  822. * Note also that when going to a fault, we must unwind the previous
  823. * level as well. Since we are doing that, we don't need to clear or
  824. * restore the TLB reservation neither.
  825. */
  826. subf r10,r13,r12
  827. cmpldi cr0,r10,PACA_EXTLB+EX_TLB_SIZE
  828. bne- virt_page_table_tlb_miss_whacko_fault
  829. /* We dig the original DEAR and ESR from slot 0 */
  830. ld r15,EX_TLB_DEAR+PACA_EXTLB(r13)
  831. ld r16,EX_TLB_ESR+PACA_EXTLB(r13)
  832. /* We check for the "special" ESR value for instruction faults */
  833. cmpdi cr0,r16,-1
  834. beq 1f
  835. mtspr SPRN_DEAR,r15
  836. mtspr SPRN_ESR,r16
  837. TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT);
  838. TLB_MISS_EPILOG_ERROR
  839. b exc_data_storage_book3e
  840. 1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT);
  841. TLB_MISS_EPILOG_ERROR
  842. b exc_instruction_storage_book3e
  843. virt_page_table_tlb_miss_whacko_fault:
  844. /* The linear fault will restart everything so ESR and DEAR will
  845. * not have been clobbered, let's just fault with what we have
  846. */
  847. TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_FAULT);
  848. TLB_MISS_EPILOG_ERROR
  849. b exc_data_storage_book3e
  850. /**************************************************************
  851. * *
  852. * TLB miss handling for Book3E with hw page table support *
  853. * *
  854. **************************************************************/
  855. /* Data TLB miss */
  856. START_EXCEPTION(data_tlb_miss_htw)
  857. TLB_MISS_PROLOG
  858. /* Now we handle the fault proper. We only save DEAR in normal
  859. * fault case since that's the only interesting values here.
  860. * We could probably also optimize by not saving SRR0/1 in the
  861. * linear mapping case but I'll leave that for later
  862. */
  863. mfspr r14,SPRN_ESR
  864. mfspr r16,SPRN_DEAR /* get faulting address */
  865. srdi r11,r16,60 /* get region */
  866. cmpldi cr0,r11,0xc /* linear mapping ? */
  867. TLB_MISS_STATS_SAVE_INFO
  868. beq tlb_load_linear /* yes -> go to linear map load */
  869. /* We do the user/kernel test for the PID here along with the RW test
  870. */
  871. cmpldi cr0,r11,0 /* Check for user region */
  872. ld r15,PACAPGD(r13) /* Load user pgdir */
  873. beq htw_tlb_miss
  874. /* XXX replace the RMW cycles with immediate loads + writes */
  875. 1: mfspr r10,SPRN_MAS1
  876. cmpldi cr0,r11,8 /* Check for vmalloc region */
  877. rlwinm r10,r10,0,16,1 /* Clear TID */
  878. mtspr SPRN_MAS1,r10
  879. ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
  880. beq+ htw_tlb_miss
  881. /* We got a crappy address, just fault with whatever DEAR and ESR
  882. * are here
  883. */
  884. TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
  885. TLB_MISS_EPILOG_ERROR
  886. b exc_data_storage_book3e
  887. /* Instruction TLB miss */
  888. START_EXCEPTION(instruction_tlb_miss_htw)
  889. TLB_MISS_PROLOG
  890. /* If we take a recursive fault, the second level handler may need
  891. * to know whether we are handling a data or instruction fault in
  892. * order to get to the right store fault handler. We provide that
  893. * info by keeping a crazy value for ESR in r14
  894. */
  895. li r14,-1 /* store to exception frame is done later */
  896. /* Now we handle the fault proper. We only save DEAR in the non
  897. * linear mapping case since we know the linear mapping case will
  898. * not re-enter. We could indeed optimize and also not save SRR0/1
  899. * in the linear mapping case but I'll leave that for later
  900. *
  901. * Faulting address is SRR0 which is already in r16
  902. */
  903. srdi r11,r16,60 /* get region */
  904. cmpldi cr0,r11,0xc /* linear mapping ? */
  905. TLB_MISS_STATS_SAVE_INFO
  906. beq tlb_load_linear /* yes -> go to linear map load */
  907. /* We do the user/kernel test for the PID here along with the RW test
  908. */
  909. cmpldi cr0,r11,0 /* Check for user region */
  910. ld r15,PACAPGD(r13) /* Load user pgdir */
  911. beq htw_tlb_miss
  912. /* XXX replace the RMW cycles with immediate loads + writes */
  913. 1: mfspr r10,SPRN_MAS1
  914. cmpldi cr0,r11,8 /* Check for vmalloc region */
  915. rlwinm r10,r10,0,16,1 /* Clear TID */
  916. mtspr SPRN_MAS1,r10
  917. ld r15,PACA_KERNELPGD(r13) /* Load kernel pgdir */
  918. beq+ htw_tlb_miss
  919. /* We got a crappy address, just fault */
  920. TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
  921. TLB_MISS_EPILOG_ERROR
  922. b exc_instruction_storage_book3e
  923. /*
  924. * This is the guts of the second-level TLB miss handler for direct
  925. * misses. We are entered with:
  926. *
  927. * r16 = virtual page table faulting address
  928. * r15 = PGD pointer
  929. * r14 = ESR
  930. * r13 = PACA
  931. * r12 = TLB exception frame in PACA
  932. * r11 = crap (free to use)
  933. * r10 = crap (free to use)
  934. *
  935. * It can be re-entered by the linear mapping miss handler. However, to
  936. * avoid too much complication, it will save/restore things for us
  937. */
  938. htw_tlb_miss:
  939. /* Search if we already have a TLB entry for that virtual address, and
  940. * if we do, bail out.
  941. *
  942. * MAS1:IND should be already set based on MAS4
  943. */
  944. PPC_TLBSRX_DOT(0,R16)
  945. beq htw_tlb_miss_done
  946. /* Now, we need to walk the page tables. First check if we are in
  947. * range.
  948. */
  949. rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
  950. bne- htw_tlb_miss_fault
  951. /* Get the PGD pointer */
  952. cmpldi cr0,r15,0
  953. beq- htw_tlb_miss_fault
  954. /* Get to PGD entry */
  955. rldicl r11,r16,64-(PGDIR_SHIFT-3),64-PGD_INDEX_SIZE-3
  956. clrrdi r10,r11,3
  957. ldx r15,r10,r15
  958. cmpdi cr0,r15,0
  959. bge htw_tlb_miss_fault
  960. #ifndef CONFIG_PPC_64K_PAGES
  961. /* Get to PUD entry */
  962. rldicl r11,r16,64-(PUD_SHIFT-3),64-PUD_INDEX_SIZE-3
  963. clrrdi r10,r11,3
  964. ldx r15,r10,r15
  965. cmpdi cr0,r15,0
  966. bge htw_tlb_miss_fault
  967. #endif /* CONFIG_PPC_64K_PAGES */
  968. /* Get to PMD entry */
  969. rldicl r11,r16,64-(PMD_SHIFT-3),64-PMD_INDEX_SIZE-3
  970. clrrdi r10,r11,3
  971. ldx r15,r10,r15
  972. cmpdi cr0,r15,0
  973. bge htw_tlb_miss_fault
  974. /* Ok, we're all right, we can now create an indirect entry for
  975. * a 1M or 256M page.
  976. *
  977. * The last trick is now that because we use "half" pages for
  978. * the HTW (1M IND is 2K and 256M IND is 32K) we need to account
  979. * for an added LSB bit to the RPN. For 64K pages, there is no
  980. * problem as we already use 32K arrays (half PTE pages), but for
  981. * 4K page we need to extract a bit from the virtual address and
  982. * insert it into the "PA52" bit of the RPN.
  983. */
  984. #ifndef CONFIG_PPC_64K_PAGES
  985. rlwimi r15,r16,32-9,20,20
  986. #endif
  987. /* Now we build the MAS:
  988. *
  989. * MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
  990. * MAS 1 : Almost fully setup
  991. * - PID already updated by caller if necessary
  992. * - TSIZE for now is base ind page size always
  993. * MAS 2 : Use defaults
  994. * MAS 3+7 : Needs to be done
  995. */
  996. #ifdef CONFIG_PPC_64K_PAGES
  997. ori r10,r15,(BOOK3E_PAGESZ_64K << MAS3_SPSIZE_SHIFT)
  998. #else
  999. ori r10,r15,(BOOK3E_PAGESZ_4K << MAS3_SPSIZE_SHIFT)
  1000. #endif
  1001. BEGIN_MMU_FTR_SECTION
  1002. srdi r16,r10,32
  1003. mtspr SPRN_MAS3,r10
  1004. mtspr SPRN_MAS7,r16
  1005. MMU_FTR_SECTION_ELSE
  1006. mtspr SPRN_MAS7_MAS3,r10
  1007. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
  1008. tlbwe
  1009. htw_tlb_miss_done:
  1010. /* We don't bother with restoring DEAR or ESR since we know we are
  1011. * level 0 and just going back to userland. They are only needed
  1012. * if you are going to take an access fault
  1013. */
  1014. TLB_MISS_STATS_X(MMSTAT_TLB_MISS_PT_OK)
  1015. TLB_MISS_EPILOG_SUCCESS
  1016. rfi
  1017. htw_tlb_miss_fault:
  1018. /* We need to check if it was an instruction miss. We know this
  1019. * though because r14 would contain -1
  1020. */
  1021. cmpdi cr0,r14,-1
  1022. beq 1f
  1023. mtspr SPRN_DEAR,r16
  1024. mtspr SPRN_ESR,r14
  1025. TLB_MISS_STATS_D(MMSTAT_TLB_MISS_PT_FAULT)
  1026. TLB_MISS_EPILOG_ERROR
  1027. b exc_data_storage_book3e
  1028. 1: TLB_MISS_STATS_I(MMSTAT_TLB_MISS_PT_FAULT)
  1029. TLB_MISS_EPILOG_ERROR
  1030. b exc_instruction_storage_book3e
  1031. /*
  1032. * This is the guts of "any" level TLB miss handler for kernel linear
  1033. * mapping misses. We are entered with:
  1034. *
  1035. *
  1036. * r16 = faulting address
  1037. * r15 = crap (free to use)
  1038. * r14 = ESR (data) or -1 (instruction)
  1039. * r13 = PACA
  1040. * r12 = TLB exception frame in PACA
  1041. * r11 = crap (free to use)
  1042. * r10 = crap (free to use)
  1043. *
  1044. * In addition we know that we will not re-enter, so in theory, we could
  1045. * use a simpler epilog not restoring SRR0/1 etc.. but we'll do that later.
  1046. *
  1047. * We also need to be careful about MAS registers here & TLB reservation,
  1048. * as we know we'll have clobbered them if we interrupt the main TLB miss
  1049. * handlers in which case we probably want to do a full restart at level
  1050. * 0 rather than saving / restoring the MAS.
  1051. *
  1052. * Note: If we care about performance of that core, we can easily shuffle
  1053. * a few things around
  1054. */
  1055. tlb_load_linear:
  1056. /* For now, we assume the linear mapping is contiguous and stops at
  1057. * linear_map_top. We also assume the size is a multiple of 1G, thus
  1058. * we only use 1G pages for now. That might have to be changed in a
  1059. * final implementation, especially when dealing with hypervisors
  1060. */
  1061. ld r11,PACATOC(r13)
  1062. ld r11,linear_map_top@got(r11)
  1063. ld r10,0(r11)
  1064. tovirt(10,10)
  1065. cmpld cr0,r16,r10
  1066. bge tlb_load_linear_fault
  1067. /* MAS1 need whole new setup. */
  1068. li r15,(BOOK3E_PAGESZ_1GB<<MAS1_TSIZE_SHIFT)
  1069. oris r15,r15,MAS1_VALID@h /* MAS1 needs V and TSIZE */
  1070. mtspr SPRN_MAS1,r15
  1071. /* Already somebody there ? */
  1072. PPC_TLBSRX_DOT(0,R16)
  1073. beq tlb_load_linear_done
  1074. /* Now we build the remaining MAS. MAS0 and 2 should be fine
  1075. * with their defaults, which leaves us with MAS 3 and 7. The
  1076. * mapping is linear, so we just take the address, clear the
  1077. * region bits, and or in the permission bits which are currently
  1078. * hard wired
  1079. */
  1080. clrrdi r10,r16,30 /* 1G page index */
  1081. clrldi r10,r10,4 /* clear region bits */
  1082. ori r10,r10,MAS3_SR|MAS3_SW|MAS3_SX
  1083. BEGIN_MMU_FTR_SECTION
  1084. srdi r16,r10,32
  1085. mtspr SPRN_MAS3,r10
  1086. mtspr SPRN_MAS7,r16
  1087. MMU_FTR_SECTION_ELSE
  1088. mtspr SPRN_MAS7_MAS3,r10
  1089. ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_PAIRED_MAS)
  1090. tlbwe
  1091. tlb_load_linear_done:
  1092. /* We use the "error" epilog for success as we do want to
  1093. * restore to the initial faulting context, whatever it was.
  1094. * We do that because we can't resume a fault within a TLB
  1095. * miss handler, due to MAS and TLB reservation being clobbered.
  1096. */
  1097. TLB_MISS_STATS_X(MMSTAT_TLB_MISS_LINEAR)
  1098. TLB_MISS_EPILOG_ERROR
  1099. rfi
  1100. tlb_load_linear_fault:
  1101. /* We keep the DEAR and ESR around, this shouldn't have happened */
  1102. cmpdi cr0,r14,-1
  1103. beq 1f
  1104. TLB_MISS_EPILOG_ERROR_SPECIAL
  1105. b exc_data_storage_book3e
  1106. 1: TLB_MISS_EPILOG_ERROR_SPECIAL
  1107. b exc_instruction_storage_book3e
  1108. #ifdef CONFIG_BOOK3E_MMU_TLB_STATS
  1109. .tlb_stat_inc:
  1110. 1: ldarx r8,0,r9
  1111. addi r8,r8,1
  1112. stdcx. r8,0,r9
  1113. bne- 1b
  1114. blr
  1115. #endif