head-nommu.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529
  1. /*
  2. * linux/arch/arm/kernel/head-nommu.S
  3. *
  4. * Copyright (C) 1994-2002 Russell King
  5. * Copyright (C) 2003-2006 Hyok S. Choi
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * Common kernel startup code (non-paged MM)
  12. *
  13. */
  14. #include <linux/linkage.h>
  15. #include <linux/init.h>
  16. #include <linux/errno.h>
  17. #include <asm/assembler.h>
  18. #include <asm/ptrace.h>
  19. #include <asm/asm-offsets.h>
  20. #include <asm/memory.h>
  21. #include <asm/cp15.h>
  22. #include <asm/thread_info.h>
  23. #include <asm/v7m.h>
  24. #include <asm/mpu.h>
  25. #include <asm/page.h>
  26. /*
  27. * Kernel startup entry point.
  28. * ---------------------------
  29. *
  30. * This is normally called from the decompressor code. The requirements
  31. * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
  32. * r1 = machine nr.
  33. *
  34. * See linux/arch/arm/tools/mach-types for the complete list of machine
  35. * numbers for r1.
  36. *
  37. */
  38. __HEAD
  39. #ifdef CONFIG_CPU_THUMBONLY
  40. .thumb
  41. ENTRY(stext)
  42. #else
  43. .arm
  44. ENTRY(stext)
  45. THUMB( badr r9, 1f ) @ Kernel is always entered in ARM.
  46. THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
  47. THUMB( .thumb ) @ switch to Thumb now.
  48. THUMB(1: )
  49. #endif
  50. setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
  51. @ and irqs disabled
  52. #if defined(CONFIG_CPU_CP15)
  53. mrc p15, 0, r9, c0, c0 @ get processor id
  54. #elif defined(CONFIG_CPU_V7M)
  55. ldr r9, =BASEADDR_V7M_SCB
  56. ldr r9, [r9, V7M_SCB_CPUID]
  57. #else
  58. ldr r9, =CONFIG_PROCESSOR_ID
  59. #endif
  60. bl __lookup_processor_type @ r5=procinfo r9=cpuid
  61. movs r10, r5 @ invalid processor (r5=0)?
  62. beq __error_p @ yes, error 'p'
  63. #ifdef CONFIG_ARM_MPU
  64. bl __setup_mpu
  65. #endif
  66. badr lr, 1f @ return (PIC) address
  67. ldr r12, [r10, #PROCINFO_INITFUNC]
  68. add r12, r12, r10
  69. ret r12
  70. 1: ldr lr, =__mmap_switched
  71. b __after_proc_init
  72. ENDPROC(stext)
  73. #ifdef CONFIG_SMP
  74. .text
  75. ENTRY(secondary_startup)
  76. /*
  77. * Common entry point for secondary CPUs.
  78. *
  79. * Ensure that we're in SVC mode, and IRQs are disabled. Lookup
  80. * the processor type - there is no need to check the machine type
  81. * as it has already been validated by the primary processor.
  82. */
  83. setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
  84. #ifndef CONFIG_CPU_CP15
  85. ldr r9, =CONFIG_PROCESSOR_ID
  86. #else
  87. mrc p15, 0, r9, c0, c0 @ get processor id
  88. #endif
  89. bl __lookup_processor_type @ r5=procinfo r9=cpuid
  90. movs r10, r5 @ invalid processor?
  91. beq __error_p @ yes, error 'p'
  92. ldr r7, __secondary_data
  93. #ifdef CONFIG_ARM_MPU
  94. bl __secondary_setup_mpu @ Initialize the MPU
  95. #endif
  96. badr lr, 1f @ return (PIC) address
  97. ldr r12, [r10, #PROCINFO_INITFUNC]
  98. add r12, r12, r10
  99. ret r12
  100. 1: bl __after_proc_init
  101. ldr sp, [r7, #12] @ set up the stack pointer
  102. mov fp, #0
  103. b secondary_start_kernel
  104. ENDPROC(secondary_startup)
  105. .type __secondary_data, %object
  106. __secondary_data:
  107. .long secondary_data
  108. #endif /* CONFIG_SMP */
  109. /*
  110. * Set the Control Register and Read the process ID.
  111. */
  112. .text
  113. __after_proc_init:
  114. #ifdef CONFIG_ARM_MPU
  115. M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
  116. M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
  117. M_CLASS(ldr r3, [r12, 0x50])
  118. AR_CLASS(mrc p15, 0, r3, c0, c1, 4) @ Read ID_MMFR0
  119. and r3, r3, #(MMFR0_PMSA) @ PMSA field
  120. teq r3, #(MMFR0_PMSAv7) @ PMSA v7
  121. beq 1f
  122. teq r3, #(MMFR0_PMSAv8) @ PMSA v8
  123. /*
  124. * Memory region attributes for PMSAv8:
  125. *
  126. * n = AttrIndx[2:0]
  127. * n MAIR
  128. * DEVICE_nGnRnE 000 00000000
  129. * NORMAL 001 11111111
  130. */
  131. ldreq r3, =PMSAv8_MAIR(0x00, PMSAv8_RGN_DEVICE_nGnRnE) | \
  132. PMSAv8_MAIR(0xff, PMSAv8_RGN_NORMAL)
  133. AR_CLASS(mcreq p15, 0, r3, c10, c2, 0) @ MAIR 0
  134. M_CLASS(streq r3, [r12, #PMSAv8_MAIR0])
  135. moveq r3, #0
  136. AR_CLASS(mcreq p15, 0, r3, c10, c2, 1) @ MAIR 1
  137. M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
  138. 1:
  139. #endif
  140. #ifdef CONFIG_CPU_CP15
  141. /*
  142. * CP15 system control register value returned in r0 from
  143. * the CPU init function.
  144. */
  145. #ifdef CONFIG_ARM_MPU
  146. biceq r0, r0, #CR_BR @ Disable the 'default mem-map'
  147. orreq r0, r0, #CR_M @ Set SCTRL.M (MPU on)
  148. #endif
  149. #if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
  150. orr r0, r0, #CR_A
  151. #else
  152. bic r0, r0, #CR_A
  153. #endif
  154. #ifdef CONFIG_CPU_DCACHE_DISABLE
  155. bic r0, r0, #CR_C
  156. #endif
  157. #ifdef CONFIG_CPU_BPREDICT_DISABLE
  158. bic r0, r0, #CR_Z
  159. #endif
  160. #ifdef CONFIG_CPU_ICACHE_DISABLE
  161. bic r0, r0, #CR_I
  162. #endif
  163. mcr p15, 0, r0, c1, c0, 0 @ write control reg
  164. isb
  165. #elif defined (CONFIG_CPU_V7M)
  166. #ifdef CONFIG_ARM_MPU
  167. ldreq r3, [r12, MPU_CTRL]
  168. biceq r3, #MPU_CTRL_PRIVDEFENA
  169. orreq r3, #MPU_CTRL_ENABLE
  170. streq r3, [r12, MPU_CTRL]
  171. isb
  172. #endif
  173. /* For V7M systems we want to modify the CCR similarly to the SCTLR */
  174. #ifdef CONFIG_CPU_DCACHE_DISABLE
  175. bic r0, r0, #V7M_SCB_CCR_DC
  176. #endif
  177. #ifdef CONFIG_CPU_BPREDICT_DISABLE
  178. bic r0, r0, #V7M_SCB_CCR_BP
  179. #endif
  180. #ifdef CONFIG_CPU_ICACHE_DISABLE
  181. bic r0, r0, #V7M_SCB_CCR_IC
  182. #endif
  183. str r0, [r12, V7M_SCB_CCR]
  184. #endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
  185. ret lr
  186. ENDPROC(__after_proc_init)
  187. .ltorg
  188. #ifdef CONFIG_ARM_MPU
  189. #ifndef CONFIG_CPU_V7M
  190. /* Set which MPU region should be programmed */
  191. .macro set_region_nr tmp, rgnr, unused
  192. mov \tmp, \rgnr @ Use static region numbers
  193. mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR
  194. .endm
  195. /* Setup a single MPU region, either D or I side (D-side for unified) */
  196. .macro setup_region bar, acr, sr, side = PMSAv7_DATA_SIDE, unused
  197. mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
  198. mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
  199. mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
  200. .endm
  201. #else
  202. .macro set_region_nr tmp, rgnr, base
  203. mov \tmp, \rgnr
  204. str \tmp, [\base, #PMSAv7_RNR]
  205. .endm
  206. .macro setup_region bar, acr, sr, unused, base
  207. lsl \acr, \acr, #16
  208. orr \acr, \acr, \sr
  209. str \bar, [\base, #PMSAv7_RBAR]
  210. str \acr, [\base, #PMSAv7_RASR]
  211. .endm
  212. #endif
  213. /*
  214. * Setup the MPU and initial MPU Regions. We create the following regions:
  215. * Region 0: Use this for probing the MPU details, so leave disabled.
  216. * Region 1: Background region - covers the whole of RAM as strongly ordered
  217. * Region 2: Normal, Shared, cacheable for RAM. From PHYS_OFFSET, size from r6
  218. * Region 3: Normal, shared, inaccessible from PL0 to protect the vectors page
  219. *
  220. * r6: Value to be written to DRSR (and IRSR if required) for PMSAv7_RAM_REGION
  221. */
  222. __HEAD
  223. ENTRY(__setup_mpu)
  224. /* Probe for v7 PMSA compliance */
  225. M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
  226. M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
  227. AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0
  228. M_CLASS(ldr r0, [r12, 0x50])
  229. and r0, r0, #(MMFR0_PMSA) @ PMSA field
  230. teq r0, #(MMFR0_PMSAv7) @ PMSA v7
  231. beq __setup_pmsa_v7
  232. teq r0, #(MMFR0_PMSAv8) @ PMSA v8
  233. beq __setup_pmsa_v8
  234. ret lr
  235. ENDPROC(__setup_mpu)
  236. ENTRY(__setup_pmsa_v7)
  237. /* Calculate the size of a region covering just the kernel */
  238. ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
  239. ldr r6, =(_end) @ Cover whole kernel
  240. sub r6, r6, r5 @ Minimum size of region to map
  241. clz r6, r6 @ Region size must be 2^N...
  242. rsb r6, r6, #31 @ ...so round up region size
  243. lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
  244. orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
  245. /* Determine whether the D/I-side memory map is unified. We set the
  246. * flags here and continue to use them for the rest of this function */
  247. AR_CLASS(mrc p15, 0, r0, c0, c0, 4) @ MPUIR
  248. M_CLASS(ldr r0, [r12, #MPU_TYPE])
  249. ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
  250. bxeq lr
  251. tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
  252. /* Setup second region first to free up r6 */
  253. set_region_nr r0, #PMSAv7_RAM_REGION, r12
  254. isb
  255. /* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
  256. ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
  257. ldr r5,=(PMSAv7_AP_PL1RW_PL0RW | PMSAv7_RGN_NORMAL)
  258. setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
  259. beq 1f @ Memory-map not unified
  260. setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
  261. 1: isb
  262. /* First/background region */
  263. set_region_nr r0, #PMSAv7_BG_REGION, r12
  264. isb
  265. /* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
  266. mov r0, #0 @ BG region starts at 0x0
  267. ldr r5,=(PMSAv7_ACR_XN | PMSAv7_RGN_STRONGLY_ORDERED | PMSAv7_AP_PL1RW_PL0NA)
  268. mov r6, #PMSAv7_RSR_ALL_MEM @ 4GB region, enabled
  269. setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ 0x0, BG region, enabled
  270. beq 2f @ Memory-map not unified
  271. setup_region r0, r5, r6, PMSAv7_INSTR_SIDE r12 @ 0x0, BG region, enabled
  272. 2: isb
  273. #ifdef CONFIG_XIP_KERNEL
  274. set_region_nr r0, #PMSAv7_ROM_REGION, r12
  275. isb
  276. ldr r5,=(PMSAv7_AP_PL1RO_PL0NA | PMSAv7_RGN_NORMAL)
  277. ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start
  278. ldr r6, =(_exiprom) @ ROM end
  279. sub r6, r6, r0 @ Minimum size of region to map
  280. clz r6, r6 @ Region size must be 2^N...
  281. rsb r6, r6, #31 @ ...so round up region size
  282. lsl r6, r6, #PMSAv7_RSR_SZ @ Put size in right field
  283. orr r6, r6, #(1 << PMSAv7_RSR_EN) @ Set region enabled bit
  284. setup_region r0, r5, r6, PMSAv7_DATA_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
  285. beq 3f @ Memory-map not unified
  286. setup_region r0, r5, r6, PMSAv7_INSTR_SIDE, r12 @ XIP_PHYS_ADDR, shared, enabled
  287. 3: isb
  288. #endif
  289. ret lr
  290. ENDPROC(__setup_pmsa_v7)
  291. ENTRY(__setup_pmsa_v8)
  292. mov r0, #0
  293. AR_CLASS(mcr p15, 0, r0, c6, c2, 1) @ PRSEL
  294. M_CLASS(str r0, [r12, #PMSAv8_RNR])
  295. isb
  296. #ifdef CONFIG_XIP_KERNEL
  297. ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start
  298. ldr r6, =(_exiprom) @ ROM end
  299. sub r6, r6, #1
  300. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  301. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
  302. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
  303. AR_CLASS(mcr p15, 0, r5, c6, c8, 0) @ PRBAR0
  304. AR_CLASS(mcr p15, 0, r6, c6, c8, 1) @ PRLAR0
  305. M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(0)])
  306. M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(0)])
  307. #endif
  308. ldr r5, =KERNEL_START
  309. ldr r6, =KERNEL_END
  310. sub r6, r6, #1
  311. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  312. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED)
  313. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN)
  314. AR_CLASS(mcr p15, 0, r5, c6, c8, 4) @ PRBAR1
  315. AR_CLASS(mcr p15, 0, r6, c6, c8, 5) @ PRLAR1
  316. M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(1)])
  317. M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(1)])
  318. /* Setup Background: 0x0 - min(KERNEL_START, XIP_PHYS_ADDR) */
  319. #ifdef CONFIG_XIP_KERNEL
  320. ldr r6, =KERNEL_START
  321. ldr r5, =CONFIG_XIP_PHYS_ADDR
  322. cmp r6, r5
  323. movcs r6, r5
  324. #else
  325. ldr r6, =KERNEL_START
  326. #endif
  327. cmp r6, #0
  328. beq 1f
  329. mov r5, #0
  330. sub r6, r6, #1
  331. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  332. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
  333. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
  334. AR_CLASS(mcr p15, 0, r5, c6, c9, 0) @ PRBAR2
  335. AR_CLASS(mcr p15, 0, r6, c6, c9, 1) @ PRLAR2
  336. M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(2)])
  337. M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(2)])
  338. 1:
  339. /* Setup Background: max(KERNEL_END, _exiprom) - 0xffffffff */
  340. #ifdef CONFIG_XIP_KERNEL
  341. ldr r5, =KERNEL_END
  342. ldr r6, =(_exiprom)
  343. cmp r5, r6
  344. movcc r5, r6
  345. #else
  346. ldr r5, =KERNEL_END
  347. #endif
  348. mov r6, #0xffffffff
  349. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  350. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
  351. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
  352. AR_CLASS(mcr p15, 0, r5, c6, c9, 4) @ PRBAR3
  353. AR_CLASS(mcr p15, 0, r6, c6, c9, 5) @ PRLAR3
  354. M_CLASS(str r5, [r12, #PMSAv8_RBAR_A(3)])
  355. M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
  356. #ifdef CONFIG_XIP_KERNEL
  357. /* Setup Background: min(_exiprom, KERNEL_END) - max(KERNEL_START, XIP_PHYS_ADDR) */
  358. ldr r5, =(_exiprom)
  359. ldr r6, =KERNEL_END
  360. cmp r5, r6
  361. movcs r5, r6
  362. ldr r6, =KERNEL_START
  363. ldr r0, =CONFIG_XIP_PHYS_ADDR
  364. cmp r6, r0
  365. movcc r6, r0
  366. sub r6, r6, #1
  367. bic r6, r6, #(PMSAv8_MINALIGN - 1)
  368. orr r5, r5, #(PMSAv8_AP_PL1RW_PL0NA | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN)
  369. orr r6, r6, #(PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN)
  370. #ifdef CONFIG_CPU_V7M
  371. /* There is no alias for n == 4 */
  372. mov r0, #4
  373. str r0, [r12, #PMSAv8_RNR] @ PRSEL
  374. isb
  375. str r5, [r12, #PMSAv8_RBAR_A(0)]
  376. str r6, [r12, #PMSAv8_RLAR_A(0)]
  377. #else
  378. mcr p15, 0, r5, c6, c10, 1 @ PRBAR4
  379. mcr p15, 0, r6, c6, c10, 2 @ PRLAR4
  380. #endif
  381. #endif
  382. ret lr
  383. ENDPROC(__setup_pmsa_v8)
  384. #ifdef CONFIG_SMP
  385. /*
  386. * r6: pointer at mpu_rgn_info
  387. */
  388. .text
  389. ENTRY(__secondary_setup_mpu)
  390. /* Use MPU region info supplied by __cpu_up */
  391. ldr r6, [r7] @ get secondary_data.mpu_rgn_info
  392. /* Probe for v7 PMSA compliance */
  393. mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
  394. and r0, r0, #(MMFR0_PMSA) @ PMSA field
  395. teq r0, #(MMFR0_PMSAv7) @ PMSA v7
  396. beq __secondary_setup_pmsa_v7
  397. teq r0, #(MMFR0_PMSAv8) @ PMSA v8
  398. beq __secondary_setup_pmsa_v8
  399. b __error_p
  400. ENDPROC(__secondary_setup_mpu)
  401. /*
  402. * r6: pointer at mpu_rgn_info
  403. */
  404. ENTRY(__secondary_setup_pmsa_v7)
  405. /* Determine whether the D/I-side memory map is unified. We set the
  406. * flags here and continue to use them for the rest of this function */
  407. mrc p15, 0, r0, c0, c0, 4 @ MPUIR
  408. ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
  409. beq __error_p
  410. ldr r4, [r6, #MPU_RNG_INFO_USED]
  411. mov r5, #MPU_RNG_SIZE
  412. add r3, r6, #MPU_RNG_INFO_RNGS
  413. mla r3, r4, r5, r3
  414. 1:
  415. tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
  416. sub r3, r3, #MPU_RNG_SIZE
  417. sub r4, r4, #1
  418. set_region_nr r0, r4
  419. isb
  420. ldr r0, [r3, #MPU_RGN_DRBAR]
  421. ldr r6, [r3, #MPU_RGN_DRSR]
  422. ldr r5, [r3, #MPU_RGN_DRACR]
  423. setup_region r0, r5, r6, PMSAv7_DATA_SIDE
  424. beq 2f
  425. setup_region r0, r5, r6, PMSAv7_INSTR_SIDE
  426. 2: isb
  427. mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR
  428. cmp r4, #0
  429. bgt 1b
  430. ret lr
  431. ENDPROC(__secondary_setup_pmsa_v7)
  432. ENTRY(__secondary_setup_pmsa_v8)
  433. ldr r4, [r6, #MPU_RNG_INFO_USED]
  434. #ifndef CONFIG_XIP_KERNEL
  435. add r4, r4, #1
  436. #endif
  437. mov r5, #MPU_RNG_SIZE
  438. add r3, r6, #MPU_RNG_INFO_RNGS
  439. mla r3, r4, r5, r3
  440. 1:
  441. sub r3, r3, #MPU_RNG_SIZE
  442. sub r4, r4, #1
  443. mcr p15, 0, r4, c6, c2, 1 @ PRSEL
  444. isb
  445. ldr r5, [r3, #MPU_RGN_PRBAR]
  446. ldr r6, [r3, #MPU_RGN_PRLAR]
  447. mcr p15, 0, r5, c6, c3, 0 @ PRBAR
  448. mcr p15, 0, r6, c6, c3, 1 @ PRLAR
  449. cmp r4, #0
  450. bgt 1b
  451. ret lr
  452. ENDPROC(__secondary_setup_pmsa_v8)
  453. #endif /* CONFIG_SMP */
  454. #endif /* CONFIG_ARM_MPU */
  455. #include "head-common.S"