vma.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385
  1. /*
  2. * Copyright 2007 Andi Kleen, SUSE Labs.
  3. * Subject to the GPL, v.2
  4. *
  5. * This contains most of the x86 vDSO kernel-side code.
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/err.h>
  9. #include <linux/sched.h>
  10. #include <linux/sched/task_stack.h>
  11. #include <linux/slab.h>
  12. #include <linux/init.h>
  13. #include <linux/random.h>
  14. #include <linux/elf.h>
  15. #include <linux/cpu.h>
  16. #include <linux/ptrace.h>
  17. #include <asm/pvclock.h>
  18. #include <asm/vgtod.h>
  19. #include <asm/proto.h>
  20. #include <asm/vdso.h>
  21. #include <asm/vvar.h>
  22. #include <asm/page.h>
  23. #include <asm/desc.h>
  24. #include <asm/cpufeature.h>
  25. #include <asm/mshyperv.h>
  26. #if defined(CONFIG_X86_64)
  27. unsigned int __read_mostly vdso64_enabled = 1;
  28. #endif
  29. void __init init_vdso_image(const struct vdso_image *image)
  30. {
  31. BUG_ON(image->size % PAGE_SIZE != 0);
  32. apply_alternatives((struct alt_instr *)(image->data + image->alt),
  33. (struct alt_instr *)(image->data + image->alt +
  34. image->alt_len));
  35. }
  36. struct linux_binprm;
  37. static int vdso_fault(const struct vm_special_mapping *sm,
  38. struct vm_area_struct *vma, struct vm_fault *vmf)
  39. {
  40. const struct vdso_image *image = vma->vm_mm->context.vdso_image;
  41. if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
  42. return VM_FAULT_SIGBUS;
  43. vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
  44. get_page(vmf->page);
  45. return 0;
  46. }
  47. static void vdso_fix_landing(const struct vdso_image *image,
  48. struct vm_area_struct *new_vma)
  49. {
  50. #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  51. if (in_ia32_syscall() && image == &vdso_image_32) {
  52. struct pt_regs *regs = current_pt_regs();
  53. unsigned long vdso_land = image->sym_int80_landing_pad;
  54. unsigned long old_land_addr = vdso_land +
  55. (unsigned long)current->mm->context.vdso;
  56. /* Fixing userspace landing - look at do_fast_syscall_32 */
  57. if (regs->ip == old_land_addr)
  58. regs->ip = new_vma->vm_start + vdso_land;
  59. }
  60. #endif
  61. }
  62. static int vdso_mremap(const struct vm_special_mapping *sm,
  63. struct vm_area_struct *new_vma)
  64. {
  65. unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
  66. const struct vdso_image *image = current->mm->context.vdso_image;
  67. if (image->size != new_size)
  68. return -EINVAL;
  69. if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
  70. return -EFAULT;
  71. vdso_fix_landing(image, new_vma);
  72. current->mm->context.vdso = (void __user *)new_vma->vm_start;
  73. return 0;
  74. }
  75. static int vvar_fault(const struct vm_special_mapping *sm,
  76. struct vm_area_struct *vma, struct vm_fault *vmf)
  77. {
  78. const struct vdso_image *image = vma->vm_mm->context.vdso_image;
  79. long sym_offset;
  80. int ret = -EFAULT;
  81. if (!image)
  82. return VM_FAULT_SIGBUS;
  83. sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
  84. image->sym_vvar_start;
  85. /*
  86. * Sanity check: a symbol offset of zero means that the page
  87. * does not exist for this vdso image, not that the page is at
  88. * offset zero relative to the text mapping. This should be
  89. * impossible here, because sym_offset should only be zero for
  90. * the page past the end of the vvar mapping.
  91. */
  92. if (sym_offset == 0)
  93. return VM_FAULT_SIGBUS;
  94. if (sym_offset == image->sym_vvar_page) {
  95. ret = vm_insert_pfn(vma, vmf->address,
  96. __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
  97. } else if (sym_offset == image->sym_pvclock_page) {
  98. struct pvclock_vsyscall_time_info *pvti =
  99. pvclock_pvti_cpu0_va();
  100. if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
  101. ret = vm_insert_pfn(
  102. vma,
  103. vmf->address,
  104. __pa(pvti) >> PAGE_SHIFT);
  105. }
  106. } else if (sym_offset == image->sym_hvclock_page) {
  107. struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
  108. if (tsc_pg && vclock_was_used(VCLOCK_HVCLOCK))
  109. ret = vm_insert_pfn(vma, vmf->address,
  110. vmalloc_to_pfn(tsc_pg));
  111. }
  112. if (ret == 0 || ret == -EBUSY)
  113. return VM_FAULT_NOPAGE;
  114. return VM_FAULT_SIGBUS;
  115. }
  116. static const struct vm_special_mapping vdso_mapping = {
  117. .name = "[vdso]",
  118. .fault = vdso_fault,
  119. .mremap = vdso_mremap,
  120. };
  121. static const struct vm_special_mapping vvar_mapping = {
  122. .name = "[vvar]",
  123. .fault = vvar_fault,
  124. };
  125. /*
  126. * Add vdso and vvar mappings to current process.
  127. * @image - blob to map
  128. * @addr - request a specific address (zero to map at free addr)
  129. */
  130. static int map_vdso(const struct vdso_image *image, unsigned long addr)
  131. {
  132. struct mm_struct *mm = current->mm;
  133. struct vm_area_struct *vma;
  134. unsigned long text_start;
  135. int ret = 0;
  136. if (down_write_killable(&mm->mmap_sem))
  137. return -EINTR;
  138. addr = get_unmapped_area(NULL, addr,
  139. image->size - image->sym_vvar_start, 0, 0);
  140. if (IS_ERR_VALUE(addr)) {
  141. ret = addr;
  142. goto up_fail;
  143. }
  144. text_start = addr - image->sym_vvar_start;
  145. /*
  146. * MAYWRITE to allow gdb to COW and set breakpoints
  147. */
  148. vma = _install_special_mapping(mm,
  149. text_start,
  150. image->size,
  151. VM_READ|VM_EXEC|
  152. VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
  153. &vdso_mapping);
  154. if (IS_ERR(vma)) {
  155. ret = PTR_ERR(vma);
  156. goto up_fail;
  157. }
  158. vma = _install_special_mapping(mm,
  159. addr,
  160. -image->sym_vvar_start,
  161. VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
  162. VM_PFNMAP,
  163. &vvar_mapping);
  164. if (IS_ERR(vma)) {
  165. ret = PTR_ERR(vma);
  166. do_munmap(mm, text_start, image->size, NULL);
  167. } else {
  168. current->mm->context.vdso = (void __user *)text_start;
  169. current->mm->context.vdso_image = image;
  170. }
  171. up_fail:
  172. up_write(&mm->mmap_sem);
  173. return ret;
  174. }
  175. #ifdef CONFIG_X86_64
  176. /*
  177. * Put the vdso above the (randomized) stack with another randomized
  178. * offset. This way there is no hole in the middle of address space.
  179. * To save memory make sure it is still in the same PTE as the stack
  180. * top. This doesn't give that many random bits.
  181. *
  182. * Note that this algorithm is imperfect: the distribution of the vdso
  183. * start address within a PMD is biased toward the end.
  184. *
  185. * Only used for the 64-bit and x32 vdsos.
  186. */
  187. static unsigned long vdso_addr(unsigned long start, unsigned len)
  188. {
  189. unsigned long addr, end;
  190. unsigned offset;
  191. /*
  192. * Round up the start address. It can start out unaligned as a result
  193. * of stack start randomization.
  194. */
  195. start = PAGE_ALIGN(start);
  196. /* Round the lowest possible end address up to a PMD boundary. */
  197. end = (start + len + PMD_SIZE - 1) & PMD_MASK;
  198. if (end >= TASK_SIZE_MAX)
  199. end = TASK_SIZE_MAX;
  200. end -= len;
  201. if (end > start) {
  202. offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
  203. addr = start + (offset << PAGE_SHIFT);
  204. } else {
  205. addr = start;
  206. }
  207. /*
  208. * Forcibly align the final address in case we have a hardware
  209. * issue that requires alignment for performance reasons.
  210. */
  211. addr = align_vdso_addr(addr);
  212. return addr;
  213. }
  214. static int map_vdso_randomized(const struct vdso_image *image)
  215. {
  216. unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
  217. return map_vdso(image, addr);
  218. }
  219. #endif
  220. int map_vdso_once(const struct vdso_image *image, unsigned long addr)
  221. {
  222. struct mm_struct *mm = current->mm;
  223. struct vm_area_struct *vma;
  224. down_write(&mm->mmap_sem);
  225. /*
  226. * Check if we have already mapped vdso blob - fail to prevent
  227. * abusing from userspace install_speciall_mapping, which may
  228. * not do accounting and rlimit right.
  229. * We could search vma near context.vdso, but it's a slowpath,
  230. * so let's explicitely check all VMAs to be completely sure.
  231. */
  232. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  233. if (vma_is_special_mapping(vma, &vdso_mapping) ||
  234. vma_is_special_mapping(vma, &vvar_mapping)) {
  235. up_write(&mm->mmap_sem);
  236. return -EEXIST;
  237. }
  238. }
  239. up_write(&mm->mmap_sem);
  240. return map_vdso(image, addr);
  241. }
  242. #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
  243. static int load_vdso32(void)
  244. {
  245. if (vdso32_enabled != 1) /* Other values all mean "disabled" */
  246. return 0;
  247. return map_vdso(&vdso_image_32, 0);
  248. }
  249. #endif
  250. #ifdef CONFIG_X86_64
  251. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  252. {
  253. if (!vdso64_enabled)
  254. return 0;
  255. return map_vdso_randomized(&vdso_image_64);
  256. }
  257. #ifdef CONFIG_COMPAT
  258. int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
  259. int uses_interp)
  260. {
  261. #ifdef CONFIG_X86_X32_ABI
  262. if (test_thread_flag(TIF_X32)) {
  263. if (!vdso64_enabled)
  264. return 0;
  265. return map_vdso_randomized(&vdso_image_x32);
  266. }
  267. #endif
  268. #ifdef CONFIG_IA32_EMULATION
  269. return load_vdso32();
  270. #else
  271. return 0;
  272. #endif
  273. }
  274. #endif
  275. #else
  276. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  277. {
  278. return load_vdso32();
  279. }
  280. #endif
  281. #ifdef CONFIG_X86_64
  282. static __init int vdso_setup(char *s)
  283. {
  284. vdso64_enabled = simple_strtoul(s, NULL, 0);
  285. return 0;
  286. }
  287. __setup("vdso=", vdso_setup);
  288. #endif
  289. #ifdef CONFIG_X86_64
  290. static void vgetcpu_cpu_init(void *arg)
  291. {
  292. int cpu = smp_processor_id();
  293. struct desc_struct d = { };
  294. unsigned long node = 0;
  295. #ifdef CONFIG_NUMA
  296. node = cpu_to_node(cpu);
  297. #endif
  298. if (static_cpu_has(X86_FEATURE_RDTSCP))
  299. write_rdtscp_aux((node << 12) | cpu);
  300. /*
  301. * Store cpu number in limit so that it can be loaded
  302. * quickly in user space in vgetcpu. (12 bits for the CPU
  303. * and 8 bits for the node)
  304. */
  305. d.limit0 = cpu | ((node & 0xf) << 12);
  306. d.limit = node >> 4;
  307. d.type = 5; /* RO data, expand down, accessed */
  308. d.dpl = 3; /* Visible to user code */
  309. d.s = 1; /* Not a system segment */
  310. d.p = 1; /* Present */
  311. d.d = 1; /* 32-bit */
  312. write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
  313. }
  314. static int vgetcpu_online(unsigned int cpu)
  315. {
  316. return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
  317. }
  318. static int __init init_vdso(void)
  319. {
  320. init_vdso_image(&vdso_image_64);
  321. #ifdef CONFIG_X86_X32_ABI
  322. init_vdso_image(&vdso_image_x32);
  323. #endif
  324. /* notifier priority > KVM */
  325. return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
  326. "x86/vdso/vma:online", vgetcpu_online, NULL);
  327. }
  328. subsys_initcall(init_vdso);
  329. #endif /* CONFIG_X86_64 */