head.S 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600
  1. /*
  2. * Low-level CPU initialisation
  3. * Based on arch/arm/kernel/head.S
  4. *
  5. * Copyright (C) 1994-2002 Russell King
  6. * Copyright (C) 2003-2012 ARM Ltd.
  7. * Authors: Catalin Marinas <catalin.marinas@arm.com>
  8. * Will Deacon <will.deacon@arm.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/linkage.h>
  23. #include <linux/init.h>
  24. #include <asm/assembler.h>
  25. #include <asm/ptrace.h>
  26. #include <asm/asm-offsets.h>
  27. #include <asm/cache.h>
  28. #include <asm/cputype.h>
  29. #include <asm/memory.h>
  30. #include <asm/thread_info.h>
  31. #include <asm/pgtable-hwdef.h>
  32. #include <asm/pgtable.h>
  33. #include <asm/page.h>
  34. #include <asm/virt.h>
  35. /*
  36. * swapper_pg_dir is the virtual address of the initial page table. We place
  37. * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
  38. * 2 pages and is placed below swapper_pg_dir.
  39. */
  40. #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
  41. #if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
  42. #error KERNEL_RAM_VADDR must start at 0xXXX80000
  43. #endif
  44. #define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
  45. #define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
  46. .globl swapper_pg_dir
  47. .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE
  48. .globl idmap_pg_dir
  49. .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
  50. .macro pgtbl, ttb0, ttb1, phys
  51. add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
  52. sub \ttb0, \ttb1, #IDMAP_DIR_SIZE
  53. .endm
  54. #ifdef CONFIG_ARM64_64K_PAGES
  55. #define BLOCK_SHIFT PAGE_SHIFT
  56. #define BLOCK_SIZE PAGE_SIZE
  57. #else
  58. #define BLOCK_SHIFT SECTION_SHIFT
  59. #define BLOCK_SIZE SECTION_SIZE
  60. #endif
  61. #define KERNEL_START KERNEL_RAM_VADDR
  62. #define KERNEL_END _end
  63. /*
  64. * Initial memory map attributes.
  65. */
  66. #ifndef CONFIG_SMP
  67. #define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF
  68. #define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF
  69. #else
  70. #define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | PTE_SHARED
  71. #define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S
  72. #endif
  73. #ifdef CONFIG_ARM64_64K_PAGES
  74. #define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
  75. #else
  76. #define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS
  77. #endif
  78. /*
  79. * Kernel startup entry point.
  80. * ---------------------------
  81. *
  82. * The requirements are:
  83. * MMU = off, D-cache = off, I-cache = on or off,
  84. * x0 = physical address to the FDT blob.
  85. *
  86. * This code is mostly position independent so you call this at
  87. * __pa(PAGE_OFFSET + TEXT_OFFSET).
  88. *
  89. * Note that the callee-saved registers are used for storing variables
  90. * that are useful before the MMU is enabled. The allocations are described
  91. * in the entry routines.
  92. */
  93. __HEAD
  94. /*
  95. * DO NOT MODIFY. Image header expected by Linux boot-loaders.
  96. */
  97. b stext // branch to kernel start, magic
  98. .long 0 // reserved
  99. .quad TEXT_OFFSET // Image load offset from start of RAM
  100. .quad 0 // reserved
  101. .quad 0 // reserved
  102. .quad 0 // reserved
  103. .quad 0 // reserved
  104. .quad 0 // reserved
  105. .byte 0x41 // Magic number, "ARM\x64"
  106. .byte 0x52
  107. .byte 0x4d
  108. .byte 0x64
  109. .word 0 // reserved
  110. ENTRY(stext)
  111. mov x21, x0 // x21=FDT
  112. bl el2_setup // Drop to EL1, w20=cpu_boot_mode
  113. bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
  114. bl set_cpu_boot_mode_flag
  115. mrs x22, midr_el1 // x22=cpuid
  116. mov x0, x22
  117. bl lookup_processor_type
  118. mov x23, x0 // x23=current cpu_table
  119. cbz x23, __error_p // invalid processor (x23=0)?
  120. bl __vet_fdt
  121. bl __create_page_tables // x25=TTBR0, x26=TTBR1
  122. /*
  123. * The following calls CPU specific code in a position independent
  124. * manner. See arch/arm64/mm/proc.S for details. x23 = base of
  125. * cpu_info structure selected by lookup_processor_type above.
  126. * On return, the CPU will be ready for the MMU to be turned on and
  127. * the TCR will have been set.
  128. */
  129. ldr x27, __switch_data // address to jump to after
  130. // MMU has been enabled
  131. adr lr, __enable_mmu // return (PIC) address
  132. ldr x12, [x23, #CPU_INFO_SETUP]
  133. add x12, x12, x28 // __virt_to_phys
  134. br x12 // initialise processor
  135. ENDPROC(stext)
  136. /*
  137. * If we're fortunate enough to boot at EL2, ensure that the world is
  138. * sane before dropping to EL1.
  139. *
  140. * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
  141. * booted in EL1 or EL2 respectively.
  142. */
  143. ENTRY(el2_setup)
  144. mrs x0, CurrentEL
  145. cmp x0, #PSR_MODE_EL2t
  146. ccmp x0, #PSR_MODE_EL2h, #0x4, ne
  147. b.ne 1f
  148. mrs x0, sctlr_el2
  149. CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
  150. CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
  151. msr sctlr_el2, x0
  152. b 2f
  153. 1: mrs x0, sctlr_el1
  154. CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
  155. CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
  156. msr sctlr_el1, x0
  157. mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
  158. isb
  159. ret
  160. /* Hyp configuration. */
  161. 2: mov x0, #(1 << 31) // 64-bit EL1
  162. msr hcr_el2, x0
  163. /* Generic timers. */
  164. mrs x0, cnthctl_el2
  165. orr x0, x0, #3 // Enable EL1 physical timers
  166. msr cnthctl_el2, x0
  167. msr cntvoff_el2, xzr // Clear virtual offset
  168. /* Populate ID registers. */
  169. mrs x0, midr_el1
  170. mrs x1, mpidr_el1
  171. msr vpidr_el2, x0
  172. msr vmpidr_el2, x1
  173. /* sctlr_el1 */
  174. mov x0, #0x0800 // Set/clear RES{1,0} bits
  175. CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
  176. CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
  177. msr sctlr_el1, x0
  178. /* Coprocessor traps. */
  179. mov x0, #0x33ff
  180. msr cptr_el2, x0 // Disable copro. traps to EL2
  181. #ifdef CONFIG_COMPAT
  182. msr hstr_el2, xzr // Disable CP15 traps to EL2
  183. #endif
  184. /* Stage-2 translation */
  185. msr vttbr_el2, xzr
  186. /* Hypervisor stub */
  187. adr x0, __hyp_stub_vectors
  188. msr vbar_el2, x0
  189. /* spsr */
  190. mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
  191. PSR_MODE_EL1h)
  192. msr spsr_el2, x0
  193. msr elr_el2, lr
  194. mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
  195. eret
  196. ENDPROC(el2_setup)
  197. /*
  198. * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
  199. * in x20. See arch/arm64/include/asm/virt.h for more info.
  200. */
  201. ENTRY(set_cpu_boot_mode_flag)
  202. ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode
  203. add x1, x1, x28
  204. cmp w20, #BOOT_CPU_MODE_EL2
  205. b.ne 1f
  206. add x1, x1, #4
  207. 1: dc cvac, x1 // Clean potentially dirty cache line
  208. dsb sy
  209. str w20, [x1] // This CPU has booted in EL1
  210. dc civac, x1 // Clean&invalidate potentially stale cache line
  211. dsb sy
  212. ret
  213. ENDPROC(set_cpu_boot_mode_flag)
  214. /*
  215. * We need to find out the CPU boot mode long after boot, so we need to
  216. * store it in a writable variable.
  217. *
  218. * This is not in .bss, because we set it sufficiently early that the boot-time
  219. * zeroing of .bss would clobber it.
  220. */
  221. .pushsection .data..cacheline_aligned
  222. ENTRY(__boot_cpu_mode)
  223. .align L1_CACHE_SHIFT
  224. .long BOOT_CPU_MODE_EL2
  225. .long 0
  226. .popsection
  227. .align 3
  228. 2: .quad .
  229. .quad PAGE_OFFSET
  230. #ifdef CONFIG_SMP
  231. .align 3
  232. 1: .quad .
  233. .quad secondary_holding_pen_release
  234. /*
  235. * This provides a "holding pen" for platforms to hold all secondary
  236. * cores are held until we're ready for them to initialise.
  237. */
  238. ENTRY(secondary_holding_pen)
  239. bl el2_setup // Drop to EL1, w20=cpu_boot_mode
  240. bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
  241. bl set_cpu_boot_mode_flag
  242. mrs x0, mpidr_el1
  243. ldr x1, =MPIDR_HWID_BITMASK
  244. and x0, x0, x1
  245. adr x1, 1b
  246. ldp x2, x3, [x1]
  247. sub x1, x1, x2
  248. add x3, x3, x1
  249. pen: ldr x4, [x3]
  250. cmp x4, x0
  251. b.eq secondary_startup
  252. wfe
  253. b pen
  254. ENDPROC(secondary_holding_pen)
  255. /*
  256. * Secondary entry point that jumps straight into the kernel. Only to
  257. * be used where CPUs are brought online dynamically by the kernel.
  258. */
  259. ENTRY(secondary_entry)
  260. bl el2_setup // Drop to EL1
  261. bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
  262. bl set_cpu_boot_mode_flag
  263. b secondary_startup
  264. ENDPROC(secondary_entry)
  265. ENTRY(secondary_startup)
  266. /*
  267. * Common entry point for secondary CPUs.
  268. */
  269. mrs x22, midr_el1 // x22=cpuid
  270. mov x0, x22
  271. bl lookup_processor_type
  272. mov x23, x0 // x23=current cpu_table
  273. cbz x23, __error_p // invalid processor (x23=0)?
  274. pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1
  275. ldr x12, [x23, #CPU_INFO_SETUP]
  276. add x12, x12, x28 // __virt_to_phys
  277. blr x12 // initialise processor
  278. ldr x21, =secondary_data
  279. ldr x27, =__secondary_switched // address to jump to after enabling the MMU
  280. b __enable_mmu
  281. ENDPROC(secondary_startup)
  282. ENTRY(__secondary_switched)
  283. ldr x0, [x21] // get secondary_data.stack
  284. mov sp, x0
  285. mov x29, #0
  286. b secondary_start_kernel
  287. ENDPROC(__secondary_switched)
  288. #endif /* CONFIG_SMP */
  289. /*
  290. * Setup common bits before finally enabling the MMU. Essentially this is just
  291. * loading the page table pointer and vector base registers.
  292. *
  293. * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on
  294. * the MMU.
  295. */
  296. __enable_mmu:
  297. ldr x5, =vectors
  298. msr vbar_el1, x5
  299. msr ttbr0_el1, x25 // load TTBR0
  300. msr ttbr1_el1, x26 // load TTBR1
  301. isb
  302. b __turn_mmu_on
  303. ENDPROC(__enable_mmu)
  304. /*
  305. * Enable the MMU. This completely changes the structure of the visible memory
  306. * space. You will not be able to trace execution through this.
  307. *
  308. * x0 = system control register
  309. * x27 = *virtual* address to jump to upon completion
  310. *
  311. * other registers depend on the function called upon completion
  312. */
  313. .align 6
  314. __turn_mmu_on:
  315. msr sctlr_el1, x0
  316. isb
  317. br x27
  318. ENDPROC(__turn_mmu_on)
  319. /*
  320. * Calculate the start of physical memory.
  321. */
  322. __calc_phys_offset:
  323. adr x0, 1f
  324. ldp x1, x2, [x0]
  325. sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET
  326. add x24, x2, x28 // x24 = PHYS_OFFSET
  327. ret
  328. ENDPROC(__calc_phys_offset)
  329. .align 3
  330. 1: .quad .
  331. .quad PAGE_OFFSET
  332. /*
  333. * Macro to populate the PGD for the corresponding block entry in the next
  334. * level (tbl) for the given virtual address.
  335. *
  336. * Preserves: pgd, tbl, virt
  337. * Corrupts: tmp1, tmp2
  338. */
  339. .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2
  340. lsr \tmp1, \virt, #PGDIR_SHIFT
  341. and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index
  342. orr \tmp2, \tbl, #3 // PGD entry table type
  343. str \tmp2, [\pgd, \tmp1, lsl #3]
  344. .endm
  345. /*
  346. * Macro to populate block entries in the page table for the start..end
  347. * virtual range (inclusive).
  348. *
  349. * Preserves: tbl, flags
  350. * Corrupts: phys, start, end, pstate
  351. */
  352. .macro create_block_map, tbl, flags, phys, start, end
  353. lsr \phys, \phys, #BLOCK_SHIFT
  354. lsr \start, \start, #BLOCK_SHIFT
  355. and \start, \start, #PTRS_PER_PTE - 1 // table index
  356. orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
  357. lsr \end, \end, #BLOCK_SHIFT
  358. and \end, \end, #PTRS_PER_PTE - 1 // table end index
  359. 9999: str \phys, [\tbl, \start, lsl #3] // store the entry
  360. add \start, \start, #1 // next entry
  361. add \phys, \phys, #BLOCK_SIZE // next block
  362. cmp \start, \end
  363. b.ls 9999b
  364. .endm
  365. /*
  366. * Setup the initial page tables. We only setup the barest amount which is
  367. * required to get the kernel running. The following sections are required:
  368. * - identity mapping to enable the MMU (low address, TTBR0)
  369. * - first few MB of the kernel linear mapping to jump to once the MMU has
  370. * been enabled, including the FDT blob (TTBR1)
  371. * - pgd entry for fixed mappings (TTBR1)
  372. */
  373. __create_page_tables:
  374. pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
  375. mov x27, lr
  376. /*
  377. * Invalidate the idmap and swapper page tables to avoid potential
  378. * dirty cache lines being evicted.
  379. */
  380. mov x0, x25
  381. add x1, x26, #SWAPPER_DIR_SIZE
  382. bl __inval_cache_range
  383. /*
  384. * Clear the idmap and swapper page tables.
  385. */
  386. mov x0, x25
  387. add x6, x26, #SWAPPER_DIR_SIZE
  388. 1: stp xzr, xzr, [x0], #16
  389. stp xzr, xzr, [x0], #16
  390. stp xzr, xzr, [x0], #16
  391. stp xzr, xzr, [x0], #16
  392. cmp x0, x6
  393. b.lo 1b
  394. ldr x7, =MM_MMUFLAGS
  395. /*
  396. * Create the identity mapping.
  397. */
  398. add x0, x25, #PAGE_SIZE // section table address
  399. ldr x3, =KERNEL_START
  400. add x3, x3, x28 // __pa(KERNEL_START)
  401. create_pgd_entry x25, x0, x3, x5, x6
  402. ldr x6, =KERNEL_END
  403. mov x5, x3 // __pa(KERNEL_START)
  404. add x6, x6, x28 // __pa(KERNEL_END)
  405. create_block_map x0, x7, x3, x5, x6
  406. /*
  407. * Map the kernel image (starting with PHYS_OFFSET).
  408. */
  409. add x0, x26, #PAGE_SIZE // section table address
  410. mov x5, #PAGE_OFFSET
  411. create_pgd_entry x26, x0, x5, x3, x6
  412. ldr x6, =KERNEL_END
  413. mov x3, x24 // phys offset
  414. create_block_map x0, x7, x3, x5, x6
  415. /*
  416. * Map the FDT blob (maximum 2MB; must be within 512MB of
  417. * PHYS_OFFSET).
  418. */
  419. mov x3, x21 // FDT phys address
  420. and x3, x3, #~((1 << 21) - 1) // 2MB aligned
  421. mov x6, #PAGE_OFFSET
  422. sub x5, x3, x24 // subtract PHYS_OFFSET
  423. tst x5, #~((1 << 29) - 1) // within 512MB?
  424. csel x21, xzr, x21, ne // zero the FDT pointer
  425. b.ne 1f
  426. add x5, x5, x6 // __va(FDT blob)
  427. add x6, x5, #1 << 21 // 2MB for the FDT blob
  428. sub x6, x6, #1 // inclusive range
  429. create_block_map x0, x7, x3, x5, x6
  430. 1:
  431. /*
  432. * Create the pgd entry for the fixed mappings.
  433. */
  434. ldr x5, =FIXADDR_TOP // Fixed mapping virtual address
  435. add x0, x26, #2 * PAGE_SIZE // section table address
  436. create_pgd_entry x26, x0, x5, x6, x7
  437. /*
  438. * Since the page tables have been populated with non-cacheable
  439. * accesses (MMU disabled), invalidate the idmap and swapper page
  440. * tables again to remove any speculatively loaded cache lines.
  441. */
  442. mov x0, x25
  443. add x1, x26, #SWAPPER_DIR_SIZE
  444. bl __inval_cache_range
  445. mov lr, x27
  446. ret
  447. ENDPROC(__create_page_tables)
  448. .ltorg
  449. .align 3
  450. .type __switch_data, %object
  451. __switch_data:
  452. .quad __mmap_switched
  453. .quad __bss_start // x6
  454. .quad _end // x7
  455. .quad processor_id // x4
  456. .quad __fdt_pointer // x5
  457. .quad memstart_addr // x6
  458. .quad init_thread_union + THREAD_START_SP // sp
  459. /*
  460. * The following fragment of code is executed with the MMU on in MMU mode, and
  461. * uses absolute addresses; this is not position independent.
  462. */
  463. __mmap_switched:
  464. adr x3, __switch_data + 8
  465. ldp x6, x7, [x3], #16
  466. 1: cmp x6, x7
  467. b.hs 2f
  468. str xzr, [x6], #8 // Clear BSS
  469. b 1b
  470. 2:
  471. ldp x4, x5, [x3], #16
  472. ldr x6, [x3], #8
  473. ldr x16, [x3]
  474. mov sp, x16
  475. str x22, [x4] // Save processor ID
  476. str x21, [x5] // Save FDT pointer
  477. str x24, [x6] // Save PHYS_OFFSET
  478. mov x29, #0
  479. b start_kernel
  480. ENDPROC(__mmap_switched)
  481. /*
  482. * Exception handling. Something went wrong and we can't proceed. We ought to
  483. * tell the user, but since we don't have any guarantee that we're even
  484. * running on the right architecture, we do virtually nothing.
  485. */
  486. __error_p:
  487. ENDPROC(__error_p)
  488. __error:
  489. 1: nop
  490. b 1b
  491. ENDPROC(__error)
  492. /*
  493. * This function gets the processor ID in w0 and searches the cpu_table[] for
  494. * a match. It returns a pointer to the struct cpu_info it found. The
  495. * cpu_table[] must end with an empty (all zeros) structure.
  496. *
  497. * This routine can be called via C code and it needs to work with the MMU
  498. * both disabled and enabled (the offset is calculated automatically).
  499. */
  500. ENTRY(lookup_processor_type)
  501. adr x1, __lookup_processor_type_data
  502. ldp x2, x3, [x1]
  503. sub x1, x1, x2 // get offset between VA and PA
  504. add x3, x3, x1 // convert VA to PA
  505. 1:
  506. ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask
  507. cbz w5, 2f // end of list?
  508. and w6, w6, w0
  509. cmp w5, w6
  510. b.eq 3f
  511. add x3, x3, #CPU_INFO_SZ
  512. b 1b
  513. 2:
  514. mov x3, #0 // unknown processor
  515. 3:
  516. mov x0, x3
  517. ret
  518. ENDPROC(lookup_processor_type)
  519. .align 3
  520. .type __lookup_processor_type_data, %object
  521. __lookup_processor_type_data:
  522. .quad .
  523. .quad cpu_table
  524. .size __lookup_processor_type_data, . - __lookup_processor_type_data
  525. /*
  526. * Determine validity of the x21 FDT pointer.
  527. * The dtb must be 8-byte aligned and live in the first 512M of memory.
  528. */
  529. __vet_fdt:
  530. tst x21, #0x7
  531. b.ne 1f
  532. cmp x21, x24
  533. b.lt 1f
  534. mov x0, #(1 << 29)
  535. add x0, x0, x24
  536. cmp x21, x0
  537. b.ge 1f
  538. ret
  539. 1:
  540. mov x21, #0
  541. ret
  542. ENDPROC(__vet_fdt)