cpu_entry_area.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/spinlock.h>
  3. #include <linux/percpu.h>
  4. #include <asm/cpu_entry_area.h>
  5. #include <asm/pgtable.h>
  6. #include <asm/fixmap.h>
  7. #include <asm/desc.h>
  8. static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
  9. #ifdef CONFIG_X86_64
  10. static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
  11. [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
  12. #endif
  13. struct cpu_entry_area *get_cpu_entry_area(int cpu)
  14. {
  15. unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
  16. BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
  17. return (struct cpu_entry_area *) va;
  18. }
  19. EXPORT_SYMBOL(get_cpu_entry_area);
  20. void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
  21. {
  22. unsigned long va = (unsigned long) cea_vaddr;
  23. pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
  24. /*
  25. * The cpu_entry_area is shared between the user and kernel
  26. * page tables. All of its ptes can safely be global.
  27. * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
  28. * non-present PTEs, so be careful not to set it in that
  29. * case to avoid confusion.
  30. */
  31. if (boot_cpu_has(X86_FEATURE_PGE) &&
  32. (pgprot_val(flags) & _PAGE_PRESENT))
  33. pte = pte_set_flags(pte, _PAGE_GLOBAL);
  34. set_pte_vaddr(va, pte);
  35. }
  36. static void __init
  37. cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
  38. {
  39. for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
  40. cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
  41. }
  42. static void percpu_setup_debug_store(int cpu)
  43. {
  44. #ifdef CONFIG_CPU_SUP_INTEL
  45. int npages;
  46. void *cea;
  47. if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
  48. return;
  49. cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
  50. npages = sizeof(struct debug_store) / PAGE_SIZE;
  51. BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
  52. cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
  53. PAGE_KERNEL);
  54. cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
  55. /*
  56. * Force the population of PMDs for not yet allocated per cpu
  57. * memory like debug store buffers.
  58. */
  59. npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
  60. for (; npages; npages--, cea += PAGE_SIZE)
  61. cea_set_pte(cea, 0, PAGE_NONE);
  62. #endif
  63. }
  64. /* Setup the fixmap mappings only once per-processor */
  65. static void __init setup_cpu_entry_area(int cpu)
  66. {
  67. #ifdef CONFIG_X86_64
  68. extern char _entry_trampoline[];
  69. /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
  70. pgprot_t gdt_prot = PAGE_KERNEL_RO;
  71. pgprot_t tss_prot = PAGE_KERNEL_RO;
  72. #else
  73. /*
  74. * On native 32-bit systems, the GDT cannot be read-only because
  75. * our double fault handler uses a task gate, and entering through
  76. * a task gate needs to change an available TSS to busy. If the
  77. * GDT is read-only, that will triple fault. The TSS cannot be
  78. * read-only because the CPU writes to it on task switches.
  79. *
  80. * On Xen PV, the GDT must be read-only because the hypervisor
  81. * requires it.
  82. */
  83. pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
  84. PAGE_KERNEL_RO : PAGE_KERNEL;
  85. pgprot_t tss_prot = PAGE_KERNEL;
  86. #endif
  87. cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
  88. gdt_prot);
  89. cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
  90. per_cpu_ptr(&entry_stack_storage, cpu), 1,
  91. PAGE_KERNEL);
  92. /*
  93. * The Intel SDM says (Volume 3, 7.2.1):
  94. *
  95. * Avoid placing a page boundary in the part of the TSS that the
  96. * processor reads during a task switch (the first 104 bytes). The
  97. * processor may not correctly perform address translations if a
  98. * boundary occurs in this area. During a task switch, the processor
  99. * reads and writes into the first 104 bytes of each TSS (using
  100. * contiguous physical addresses beginning with the physical address
  101. * of the first byte of the TSS). So, after TSS access begins, if
  102. * part of the 104 bytes is not physically contiguous, the processor
  103. * will access incorrect information without generating a page-fault
  104. * exception.
  105. *
  106. * There are also a lot of errata involving the TSS spanning a page
  107. * boundary. Assert that we're not doing that.
  108. */
  109. BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
  110. offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
  111. BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
  112. cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
  113. &per_cpu(cpu_tss_rw, cpu),
  114. sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
  115. #ifdef CONFIG_X86_32
  116. per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
  117. #endif
  118. #ifdef CONFIG_X86_64
  119. BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
  120. BUILD_BUG_ON(sizeof(exception_stacks) !=
  121. sizeof(((struct cpu_entry_area *)0)->exception_stacks));
  122. cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
  123. &per_cpu(exception_stacks, cpu),
  124. sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
  125. cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
  126. __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
  127. #endif
  128. percpu_setup_debug_store(cpu);
  129. }
  130. static __init void setup_cpu_entry_area_ptes(void)
  131. {
  132. #ifdef CONFIG_X86_32
  133. unsigned long start, end;
  134. BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
  135. BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
  136. start = CPU_ENTRY_AREA_BASE;
  137. end = start + CPU_ENTRY_AREA_MAP_SIZE;
  138. /* Careful here: start + PMD_SIZE might wrap around */
  139. for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
  140. populate_extra_pte(start);
  141. #endif
  142. }
  143. void __init setup_cpu_entry_areas(void)
  144. {
  145. unsigned int cpu;
  146. setup_cpu_entry_area_ptes();
  147. for_each_possible_cpu(cpu)
  148. setup_cpu_entry_area(cpu);
  149. /*
  150. * This is the last essential update to swapper_pgdir which needs
  151. * to be synchronized to initial_page_table on 32bit.
  152. */
  153. sync_initial_page_table();
  154. }