slb_low.S 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. /*
  2. * Low-level SLB routines
  3. *
  4. * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
  5. *
  6. * Based on earlier C version:
  7. * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
  8. * Copyright (c) 2001 Dave Engebretsen
  9. * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <asm/processor.h>
  17. #include <asm/ppc_asm.h>
  18. #include <asm/asm-offsets.h>
  19. #include <asm/cputable.h>
  20. #include <asm/page.h>
  21. #include <asm/mmu.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/firmware.h>
  24. /* void slb_allocate_realmode(unsigned long ea);
  25. *
  26. * Create an SLB entry for the given EA (user or kernel).
  27. * r3 = faulting address, r13 = PACA
  28. * r9, r10, r11 are clobbered by this function
  29. * No other registers are examined or changed.
  30. */
  31. _GLOBAL(slb_allocate_realmode)
  32. /*
  33. * check for bad kernel/user address
  34. * (ea & ~REGION_MASK) >= PGTABLE_RANGE
  35. */
  36. rldicr. r9,r3,4,(63 - PGTABLE_EADDR_SIZE - 4)
  37. bne- 8f
  38. srdi r9,r3,60 /* get region */
  39. srdi r10,r3,SID_SHIFT /* get esid */
  40. cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
  41. /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
  42. blt cr7,0f /* user or kernel? */
  43. /* kernel address: proto-VSID = ESID */
  44. /* WARNING - MAGIC: we don't use the VSID 0xfffffffff, but
  45. * this code will generate the protoVSID 0xfffffffff for the
  46. * top segment. That's ok, the scramble below will translate
  47. * it to VSID 0, which is reserved as a bad VSID - one which
  48. * will never have any pages in it. */
  49. /* Check if hitting the linear mapping or some other kernel space
  50. */
  51. bne cr7,1f
  52. /* Linear mapping encoding bits, the "li" instruction below will
  53. * be patched by the kernel at boot
  54. */
  55. .globl slb_miss_kernel_load_linear
  56. slb_miss_kernel_load_linear:
  57. li r11,0
  58. /*
  59. * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
  60. * r9 = region id.
  61. */
  62. addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
  63. addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
  64. BEGIN_FTR_SECTION
  65. b slb_finish_load
  66. END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
  67. b slb_finish_load_1T
  68. 1:
  69. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  70. /* Check virtual memmap region. To be patches at kernel boot */
  71. cmpldi cr0,r9,0xf
  72. bne 1f
  73. .globl slb_miss_kernel_load_vmemmap
  74. slb_miss_kernel_load_vmemmap:
  75. li r11,0
  76. b 6f
  77. 1:
  78. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  79. /* vmalloc mapping gets the encoding from the PACA as the mapping
  80. * can be demoted from 64K -> 4K dynamically on some machines
  81. */
  82. clrldi r11,r10,48
  83. cmpldi r11,(VMALLOC_SIZE >> 28) - 1
  84. bgt 5f
  85. lhz r11,PACAVMALLOCSLLP(r13)
  86. b 6f
  87. 5:
  88. /* IO mapping */
  89. .globl slb_miss_kernel_load_io
  90. slb_miss_kernel_load_io:
  91. li r11,0
  92. 6:
  93. /*
  94. * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
  95. * r9 = region id.
  96. */
  97. addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
  98. addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
  99. BEGIN_FTR_SECTION
  100. b slb_finish_load
  101. END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
  102. b slb_finish_load_1T
  103. 0:
  104. /* when using slices, we extract the psize off the slice bitmaps
  105. * and then we need to get the sllp encoding off the mmu_psize_defs
  106. * array.
  107. *
  108. * XXX This is a bit inefficient especially for the normal case,
  109. * so we should try to implement a fast path for the standard page
  110. * size using the old sllp value so we avoid the array. We cannot
  111. * really do dynamic patching unfortunately as processes might flip
  112. * between 4k and 64k standard page size
  113. */
  114. #ifdef CONFIG_PPC_MM_SLICES
  115. /* r10 have esid */
  116. cmpldi r10,16
  117. /* below SLICE_LOW_TOP */
  118. blt 5f
  119. /*
  120. * Handle hpsizes,
  121. * r9 is get_paca()->context.high_slices_psize[index], r11 is mask_index
  122. */
  123. srdi r11,r10,(SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT + 1) /* index */
  124. addi r9,r11,PACAHIGHSLICEPSIZE
  125. lbzx r9,r13,r9 /* r9 is hpsizes[r11] */
  126. /* r11 = (r10 >> (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)) & 0x1 */
  127. rldicl r11,r10,(64 - (SLICE_HIGH_SHIFT - SLICE_LOW_SHIFT)),63
  128. b 6f
  129. 5:
  130. /*
  131. * Handle lpsizes
  132. * r9 is get_paca()->context.low_slices_psize, r11 is index
  133. */
  134. ld r9,PACALOWSLICESPSIZE(r13)
  135. mr r11,r10
  136. 6:
  137. sldi r11,r11,2 /* index * 4 */
  138. /* Extract the psize and multiply to get an array offset */
  139. srd r9,r9,r11
  140. andi. r9,r9,0xf
  141. mulli r9,r9,MMUPSIZEDEFSIZE
  142. /* Now get to the array and obtain the sllp
  143. */
  144. ld r11,PACATOC(r13)
  145. ld r11,mmu_psize_defs@got(r11)
  146. add r11,r11,r9
  147. ld r11,MMUPSIZESLLP(r11)
  148. ori r11,r11,SLB_VSID_USER
  149. #else
  150. /* paca context sllp already contains the SLB_VSID_USER bits */
  151. lhz r11,PACACONTEXTSLLP(r13)
  152. #endif /* CONFIG_PPC_MM_SLICES */
  153. ld r9,PACACONTEXTID(r13)
  154. BEGIN_FTR_SECTION
  155. cmpldi r10,0x1000
  156. bge slb_finish_load_1T
  157. END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
  158. b slb_finish_load
  159. 8: /* invalid EA */
  160. li r10,0 /* BAD_VSID */
  161. li r9,0 /* BAD_VSID */
  162. li r11,SLB_VSID_USER /* flags don't much matter */
  163. b slb_finish_load
  164. #ifdef __DISABLED__
  165. /* void slb_allocate_user(unsigned long ea);
  166. *
  167. * Create an SLB entry for the given EA (user or kernel).
  168. * r3 = faulting address, r13 = PACA
  169. * r9, r10, r11 are clobbered by this function
  170. * No other registers are examined or changed.
  171. *
  172. * It is called with translation enabled in order to be able to walk the
  173. * page tables. This is not currently used.
  174. */
  175. _GLOBAL(slb_allocate_user)
  176. /* r3 = faulting address */
  177. srdi r10,r3,28 /* get esid */
  178. crset 4*cr7+lt /* set "user" flag for later */
  179. /* check if we fit in the range covered by the pagetables*/
  180. srdi. r9,r3,PGTABLE_EADDR_SIZE
  181. crnot 4*cr0+eq,4*cr0+eq
  182. beqlr
  183. /* now we need to get to the page tables in order to get the page
  184. * size encoding from the PMD. In the future, we'll be able to deal
  185. * with 1T segments too by getting the encoding from the PGD instead
  186. */
  187. ld r9,PACAPGDIR(r13)
  188. cmpldi cr0,r9,0
  189. beqlr
  190. rlwinm r11,r10,8,25,28
  191. ldx r9,r9,r11 /* get pgd_t */
  192. cmpldi cr0,r9,0
  193. beqlr
  194. rlwinm r11,r10,3,17,28
  195. ldx r9,r9,r11 /* get pmd_t */
  196. cmpldi cr0,r9,0
  197. beqlr
  198. /* build vsid flags */
  199. andi. r11,r9,SLB_VSID_LLP
  200. ori r11,r11,SLB_VSID_USER
  201. /* get context to calculate proto-VSID */
  202. ld r9,PACACONTEXTID(r13)
  203. /* fall through slb_finish_load */
  204. #endif /* __DISABLED__ */
  205. /*
  206. * Finish loading of an SLB entry and return
  207. *
  208. * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
  209. */
  210. slb_finish_load:
  211. rldimi r10,r9,ESID_BITS,0
  212. ASM_VSID_SCRAMBLE(r10,r9,256M)
  213. /*
  214. * bits above VSID_BITS_256M need to be ignored from r10
  215. * also combine VSID and flags
  216. */
  217. rldimi r11,r10,SLB_VSID_SHIFT,(64 - (SLB_VSID_SHIFT + VSID_BITS_256M))
  218. /* r3 = EA, r11 = VSID data */
  219. /*
  220. * Find a slot, round robin. Previously we tried to find a
  221. * free slot first but that took too long. Unfortunately we
  222. * dont have any LRU information to help us choose a slot.
  223. */
  224. 7: ld r10,PACASTABRR(r13)
  225. addi r10,r10,1
  226. /* This gets soft patched on boot. */
  227. .globl slb_compare_rr_to_size
  228. slb_compare_rr_to_size:
  229. cmpldi r10,0
  230. blt+ 4f
  231. li r10,SLB_NUM_BOLTED
  232. 4:
  233. std r10,PACASTABRR(r13)
  234. 3:
  235. rldimi r3,r10,0,36 /* r3= EA[0:35] | entry */
  236. oris r10,r3,SLB_ESID_V@h /* r3 |= SLB_ESID_V */
  237. /* r3 = ESID data, r11 = VSID data */
  238. /*
  239. * No need for an isync before or after this slbmte. The exception
  240. * we enter with and the rfid we exit with are context synchronizing.
  241. */
  242. slbmte r11,r10
  243. /* we're done for kernel addresses */
  244. crclr 4*cr0+eq /* set result to "success" */
  245. bgelr cr7
  246. /* Update the slb cache */
  247. lhz r3,PACASLBCACHEPTR(r13) /* offset = paca->slb_cache_ptr */
  248. cmpldi r3,SLB_CACHE_ENTRIES
  249. bge 1f
  250. /* still room in the slb cache */
  251. sldi r11,r3,2 /* r11 = offset * sizeof(u32) */
  252. srdi r10,r10,28 /* get the 36 bits of the ESID */
  253. add r11,r11,r13 /* r11 = (u32 *)paca + offset */
  254. stw r10,PACASLBCACHE(r11) /* paca->slb_cache[offset] = esid */
  255. addi r3,r3,1 /* offset++ */
  256. b 2f
  257. 1: /* offset >= SLB_CACHE_ENTRIES */
  258. li r3,SLB_CACHE_ENTRIES+1
  259. 2:
  260. sth r3,PACASLBCACHEPTR(r13) /* paca->slb_cache_ptr = offset */
  261. crclr 4*cr0+eq /* set result to "success" */
  262. blr
  263. /*
  264. * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
  265. *
  266. * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
  267. */
  268. slb_finish_load_1T:
  269. srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
  270. rldimi r10,r9,ESID_BITS_1T,0
  271. ASM_VSID_SCRAMBLE(r10,r9,1T)
  272. /*
  273. * bits above VSID_BITS_1T need to be ignored from r10
  274. * also combine VSID and flags
  275. */
  276. rldimi r11,r10,SLB_VSID_SHIFT_1T,(64 - (SLB_VSID_SHIFT_1T + VSID_BITS_1T))
  277. li r10,MMU_SEGSIZE_1T
  278. rldimi r11,r10,SLB_VSID_SSIZE_SHIFT,0 /* insert segment size */
  279. /* r3 = EA, r11 = VSID data */
  280. clrrdi r3,r3,SID_SHIFT_1T /* clear out non-ESID bits */
  281. b 7b