processor.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * arch/arm/include/asm/processor.h
  3. *
  4. * Copyright (C) 1995-1999 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef __ASM_ARM_PROCESSOR_H
  11. #define __ASM_ARM_PROCESSOR_H
  12. /*
  13. * Default implementation of macro that returns current
  14. * instruction pointer ("program counter").
  15. */
  16. #define current_text_addr() ({ __label__ _l; _l: &&_l;})
  17. #ifdef __KERNEL__
  18. #include <asm/hw_breakpoint.h>
  19. #include <asm/ptrace.h>
  20. #include <asm/types.h>
  21. #include <asm/unified.h>
  22. #ifdef __KERNEL__
  23. #define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
  24. TASK_SIZE : TASK_SIZE_26)
  25. #define STACK_TOP_MAX TASK_SIZE
  26. #endif
  27. struct debug_info {
  28. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  29. struct perf_event *hbp[ARM_MAX_HBP_SLOTS];
  30. #endif
  31. };
  32. struct thread_struct {
  33. /* fault info */
  34. unsigned long address;
  35. unsigned long trap_no;
  36. unsigned long error_code;
  37. /* debugging */
  38. struct debug_info debug;
  39. };
  40. /*
  41. * Everything usercopied to/from thread_struct is statically-sized, so
  42. * no hardened usercopy whitelist is needed.
  43. */
  44. static inline void arch_thread_struct_whitelist(unsigned long *offset,
  45. unsigned long *size)
  46. {
  47. *offset = *size = 0;
  48. }
  49. #define INIT_THREAD { }
  50. #define start_thread(regs,pc,sp) \
  51. ({ \
  52. unsigned long r7, r8, r9; \
  53. \
  54. if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC)) { \
  55. r7 = regs->ARM_r7; \
  56. r8 = regs->ARM_r8; \
  57. r9 = regs->ARM_r9; \
  58. } \
  59. memset(regs->uregs, 0, sizeof(regs->uregs)); \
  60. if (IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) && \
  61. current->personality & FDPIC_FUNCPTRS) { \
  62. regs->ARM_r7 = r7; \
  63. regs->ARM_r8 = r8; \
  64. regs->ARM_r9 = r9; \
  65. regs->ARM_r10 = current->mm->start_data; \
  66. } else if (!IS_ENABLED(CONFIG_MMU)) \
  67. regs->ARM_r10 = current->mm->start_data; \
  68. if (current->personality & ADDR_LIMIT_32BIT) \
  69. regs->ARM_cpsr = USR_MODE; \
  70. else \
  71. regs->ARM_cpsr = USR26_MODE; \
  72. if (elf_hwcap & HWCAP_THUMB && pc & 1) \
  73. regs->ARM_cpsr |= PSR_T_BIT; \
  74. regs->ARM_cpsr |= PSR_ENDSTATE; \
  75. regs->ARM_pc = pc & ~1; /* pc */ \
  76. regs->ARM_sp = sp; /* sp */ \
  77. })
  78. /* Forward declaration, a strange C thing */
  79. struct task_struct;
  80. /* Free all resources held by a thread. */
  81. extern void release_thread(struct task_struct *);
  82. unsigned long get_wchan(struct task_struct *p);
  83. #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
  84. #define cpu_relax() \
  85. do { \
  86. smp_mb(); \
  87. __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
  88. } while (0)
  89. #else
  90. #define cpu_relax() barrier()
  91. #endif
  92. #define task_pt_regs(p) \
  93. ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
  94. #define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
  95. #define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
  96. #ifdef CONFIG_SMP
  97. #define __ALT_SMP_ASM(smp, up) \
  98. "9998: " smp "\n" \
  99. " .pushsection \".alt.smp.init\", \"a\"\n" \
  100. " .long 9998b\n" \
  101. " " up "\n" \
  102. " .popsection\n"
  103. #else
  104. #define __ALT_SMP_ASM(smp, up) up
  105. #endif
  106. /*
  107. * Prefetching support - only ARMv5.
  108. */
  109. #if __LINUX_ARM_ARCH__ >= 5
  110. #define ARCH_HAS_PREFETCH
  111. static inline void prefetch(const void *ptr)
  112. {
  113. __asm__ __volatile__(
  114. "pld\t%a0"
  115. :: "p" (ptr));
  116. }
  117. #if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
  118. #define ARCH_HAS_PREFETCHW
  119. static inline void prefetchw(const void *ptr)
  120. {
  121. __asm__ __volatile__(
  122. ".arch_extension mp\n"
  123. __ALT_SMP_ASM(
  124. WASM(pldw) "\t%a0",
  125. WASM(pld) "\t%a0"
  126. )
  127. :: "p" (ptr));
  128. }
  129. #endif
  130. #endif
  131. #define HAVE_ARCH_PICK_MMAP_LAYOUT
  132. #endif
  133. #endif /* __ASM_ARM_PROCESSOR_H */