cpu_entry_area.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/spinlock.h>
  3. #include <linux/percpu.h>
  4. #include <linux/kallsyms.h>
  5. #include <linux/kcore.h>
  6. #include <asm/cpu_entry_area.h>
  7. #include <asm/pgtable.h>
  8. #include <asm/fixmap.h>
  9. #include <asm/desc.h>
  10. static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
  11. #ifdef CONFIG_X86_64
  12. static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
  13. [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
  14. #endif
  15. struct cpu_entry_area *get_cpu_entry_area(int cpu)
  16. {
  17. unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
  18. BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
  19. return (struct cpu_entry_area *) va;
  20. }
  21. EXPORT_SYMBOL(get_cpu_entry_area);
  22. void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
  23. {
  24. unsigned long va = (unsigned long) cea_vaddr;
  25. pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
  26. /*
  27. * The cpu_entry_area is shared between the user and kernel
  28. * page tables. All of its ptes can safely be global.
  29. * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
  30. * non-present PTEs, so be careful not to set it in that
  31. * case to avoid confusion.
  32. */
  33. if (boot_cpu_has(X86_FEATURE_PGE) &&
  34. (pgprot_val(flags) & _PAGE_PRESENT))
  35. pte = pte_set_flags(pte, _PAGE_GLOBAL);
  36. set_pte_vaddr(va, pte);
  37. }
  38. static void __init
  39. cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
  40. {
  41. for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
  42. cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
  43. }
  44. static void percpu_setup_debug_store(int cpu)
  45. {
  46. #ifdef CONFIG_CPU_SUP_INTEL
  47. int npages;
  48. void *cea;
  49. if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
  50. return;
  51. cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
  52. npages = sizeof(struct debug_store) / PAGE_SIZE;
  53. BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
  54. cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
  55. PAGE_KERNEL);
  56. cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
  57. /*
  58. * Force the population of PMDs for not yet allocated per cpu
  59. * memory like debug store buffers.
  60. */
  61. npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
  62. for (; npages; npages--, cea += PAGE_SIZE)
  63. cea_set_pte(cea, 0, PAGE_NONE);
  64. #endif
  65. }
  66. /* Setup the fixmap mappings only once per-processor */
  67. static void __init setup_cpu_entry_area(int cpu)
  68. {
  69. #ifdef CONFIG_X86_64
  70. /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
  71. pgprot_t gdt_prot = PAGE_KERNEL_RO;
  72. pgprot_t tss_prot = PAGE_KERNEL_RO;
  73. #else
  74. /*
  75. * On native 32-bit systems, the GDT cannot be read-only because
  76. * our double fault handler uses a task gate, and entering through
  77. * a task gate needs to change an available TSS to busy. If the
  78. * GDT is read-only, that will triple fault. The TSS cannot be
  79. * read-only because the CPU writes to it on task switches.
  80. *
  81. * On Xen PV, the GDT must be read-only because the hypervisor
  82. * requires it.
  83. */
  84. pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
  85. PAGE_KERNEL_RO : PAGE_KERNEL;
  86. pgprot_t tss_prot = PAGE_KERNEL;
  87. #endif
  88. cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
  89. gdt_prot);
  90. cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
  91. per_cpu_ptr(&entry_stack_storage, cpu), 1,
  92. PAGE_KERNEL);
  93. /*
  94. * The Intel SDM says (Volume 3, 7.2.1):
  95. *
  96. * Avoid placing a page boundary in the part of the TSS that the
  97. * processor reads during a task switch (the first 104 bytes). The
  98. * processor may not correctly perform address translations if a
  99. * boundary occurs in this area. During a task switch, the processor
  100. * reads and writes into the first 104 bytes of each TSS (using
  101. * contiguous physical addresses beginning with the physical address
  102. * of the first byte of the TSS). So, after TSS access begins, if
  103. * part of the 104 bytes is not physically contiguous, the processor
  104. * will access incorrect information without generating a page-fault
  105. * exception.
  106. *
  107. * There are also a lot of errata involving the TSS spanning a page
  108. * boundary. Assert that we're not doing that.
  109. */
  110. BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
  111. offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
  112. BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
  113. cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
  114. &per_cpu(cpu_tss_rw, cpu),
  115. sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
  116. #ifdef CONFIG_X86_32
  117. per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
  118. #endif
  119. #ifdef CONFIG_X86_64
  120. BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
  121. BUILD_BUG_ON(sizeof(exception_stacks) !=
  122. sizeof(((struct cpu_entry_area *)0)->exception_stacks));
  123. cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
  124. &per_cpu(exception_stacks, cpu),
  125. sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
  126. #endif
  127. percpu_setup_debug_store(cpu);
  128. }
  129. static __init void setup_cpu_entry_area_ptes(void)
  130. {
  131. #ifdef CONFIG_X86_32
  132. unsigned long start, end;
  133. BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
  134. BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
  135. start = CPU_ENTRY_AREA_BASE;
  136. end = start + CPU_ENTRY_AREA_MAP_SIZE;
  137. /* Careful here: start + PMD_SIZE might wrap around */
  138. for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
  139. populate_extra_pte(start);
  140. #endif
  141. }
  142. void __init setup_cpu_entry_areas(void)
  143. {
  144. unsigned int cpu;
  145. setup_cpu_entry_area_ptes();
  146. for_each_possible_cpu(cpu)
  147. setup_cpu_entry_area(cpu);
  148. /*
  149. * This is the last essential update to swapper_pgdir which needs
  150. * to be synchronized to initial_page_table on 32bit.
  151. */
  152. sync_initial_page_table();
  153. }