trampoline.S 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. /*
  2. * kexec trampoline
  3. *
  4. * Based on code taken from kexec-tools and kexec-lite.
  5. *
  6. * Copyright (C) 2004 - 2005, Milton D Miller II, IBM Corporation
  7. * Copyright (C) 2006, Mohan Kumar M, IBM Corporation
  8. * Copyright (C) 2013, Anton Blanchard, IBM Corporation
  9. *
  10. * This program is free software; you can redistribute it and/or modify it under
  11. * the terms of the GNU General Public License as published by the Free
  12. * Software Foundation (version 2 of the License).
  13. */
  14. #if defined(__LITTLE_ENDIAN__)
  15. #define STWX_BE stwbrx
  16. #define LWZX_BE lwbrx
  17. #elif defined(__BIG_ENDIAN__)
  18. #define STWX_BE stwx
  19. #define LWZX_BE lwzx
  20. #else
  21. #error no endianness defined!
  22. #endif
  23. .machine ppc64
  24. .balign 256
  25. .globl purgatory_start
  26. purgatory_start:
  27. b master
  28. /* ABI: possible run_at_load flag at 0x5c */
  29. .org purgatory_start + 0x5c
  30. .globl run_at_load
  31. run_at_load:
  32. .long 0
  33. .size run_at_load, . - run_at_load
  34. /* ABI: slaves start at 60 with r3=phys */
  35. .org purgatory_start + 0x60
  36. slave:
  37. b .
  38. /* ABI: end of copied region */
  39. .org purgatory_start + 0x100
  40. .size purgatory_start, . - purgatory_start
  41. /*
  42. * The above 0x100 bytes at purgatory_start are replaced with the
  43. * code from the kernel (or next stage) by setup_purgatory().
  44. */
  45. master:
  46. or %r1,%r1,%r1 /* low priority to let other threads catchup */
  47. isync
  48. mr %r17,%r3 /* save cpu id to r17 */
  49. mr %r15,%r4 /* save physical address in reg15 */
  50. or %r3,%r3,%r3 /* ok now to high priority, lets boot */
  51. lis %r6,0x1
  52. mtctr %r6 /* delay a bit for slaves to catch up */
  53. bdnz . /* before we overwrite 0-100 again */
  54. bl 0f /* Work out where we're running */
  55. 0: mflr %r18
  56. /* load device-tree address */
  57. ld %r3, (dt_offset - 0b)(%r18)
  58. mr %r16,%r3 /* save dt address in reg16 */
  59. li %r4,20
  60. LWZX_BE %r6,%r3,%r4 /* fetch __be32 version number at byte 20 */
  61. cmpwi %r0,%r6,2 /* v2 or later? */
  62. blt 1f
  63. li %r4,28
  64. STWX_BE %r17,%r3,%r4 /* Store my cpu as __be32 at byte 28 */
  65. 1:
  66. /* load the kernel address */
  67. ld %r4,(kernel - 0b)(%r18)
  68. /* load the run_at_load flag */
  69. /* possibly patched by kexec */
  70. ld %r6,(run_at_load - 0b)(%r18)
  71. /* and patch it into the kernel */
  72. stw %r6,(0x5c)(%r4)
  73. mr %r3,%r16 /* restore dt address */
  74. li %r5,0 /* r5 will be 0 for kernel */
  75. mfmsr %r11
  76. andi. %r10,%r11,1 /* test MSR_LE */
  77. bne .Little_endian
  78. mtctr %r4 /* prepare branch to */
  79. bctr /* start kernel */
  80. .Little_endian:
  81. mtsrr0 %r4 /* prepare branch to */
  82. clrrdi %r11,%r11,1 /* clear MSR_LE */
  83. mtsrr1 %r11
  84. rfid /* update MSR and start kernel */
  85. .balign 8
  86. .globl kernel
  87. kernel:
  88. .llong 0x0
  89. .size kernel, . - kernel
  90. .balign 8
  91. .globl dt_offset
  92. dt_offset:
  93. .llong 0x0
  94. .size dt_offset, . - dt_offset
  95. .data
  96. .balign 8
  97. .globl sha256_digest
  98. sha256_digest:
  99. .skip 32
  100. .size sha256_digest, . - sha256_digest
  101. .balign 8
  102. .globl sha_regions
  103. sha_regions:
  104. .skip 8 * 2 * 16
  105. .size sha_regions, . - sha_regions