cpu_entry_area.h 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081
  1. // SPDX-License-Identifier: GPL-2.0
  2. #ifndef _ASM_X86_CPU_ENTRY_AREA_H
  3. #define _ASM_X86_CPU_ENTRY_AREA_H
  4. #include <linux/percpu-defs.h>
  5. #include <asm/processor.h>
  6. #include <asm/intel_ds.h>
  7. /*
  8. * cpu_entry_area is a percpu region that contains things needed by the CPU
  9. * and early entry/exit code. Real types aren't used for all fields here
  10. * to avoid circular header dependencies.
  11. *
  12. * Every field is a virtual alias of some other allocated backing store.
  13. * There is no direct allocation of a struct cpu_entry_area.
  14. */
  15. struct cpu_entry_area {
  16. char gdt[PAGE_SIZE];
  17. /*
  18. * The GDT is just below entry_stack and thus serves (on x86_64) as
  19. * a a read-only guard page.
  20. */
  21. struct entry_stack_page entry_stack_page;
  22. /*
  23. * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
  24. * we need task switches to work, and task switches write to the TSS.
  25. */
  26. struct tss_struct tss;
  27. char entry_trampoline[PAGE_SIZE];
  28. #ifdef CONFIG_X86_64
  29. /*
  30. * Exception stacks used for IST entries.
  31. *
  32. * In the future, this should have a separate slot for each stack
  33. * with guard pages between them.
  34. */
  35. char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
  36. #endif
  37. #ifdef CONFIG_CPU_SUP_INTEL
  38. /*
  39. * Per CPU debug store for Intel performance monitoring. Wastes a
  40. * full page at the moment.
  41. */
  42. struct debug_store cpu_debug_store;
  43. /*
  44. * The actual PEBS/BTS buffers must be mapped to user space
  45. * Reserve enough fixmap PTEs.
  46. */
  47. struct debug_store_buffers cpu_debug_buffers;
  48. #endif
  49. };
  50. #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
  51. #define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
  52. DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
  53. extern void setup_cpu_entry_areas(void);
  54. extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
  55. #define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
  56. #define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
  57. #define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
  58. #define CPU_ENTRY_AREA_MAP_SIZE \
  59. (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
  60. extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
  61. static inline struct entry_stack *cpu_entry_stack(int cpu)
  62. {
  63. return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
  64. }
  65. #endif