process_64.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819
  1. /*
  2. * Copyright (C) 1995 Linus Torvalds
  3. *
  4. * Pentium III FXSR, SSE support
  5. * Gareth Hughes <gareth@valinux.com>, May 2000
  6. *
  7. * X86-64 port
  8. * Andi Kleen.
  9. *
  10. * CPU hotplug support - ashok.raj@intel.com
  11. */
  12. /*
  13. * This file handles the architecture-dependent parts of process handling..
  14. */
  15. #include <linux/cpu.h>
  16. #include <linux/errno.h>
  17. #include <linux/sched.h>
  18. #include <linux/sched/task.h>
  19. #include <linux/sched/task_stack.h>
  20. #include <linux/fs.h>
  21. #include <linux/kernel.h>
  22. #include <linux/mm.h>
  23. #include <linux/elfcore.h>
  24. #include <linux/smp.h>
  25. #include <linux/slab.h>
  26. #include <linux/user.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/delay.h>
  29. #include <linux/export.h>
  30. #include <linux/ptrace.h>
  31. #include <linux/notifier.h>
  32. #include <linux/kprobes.h>
  33. #include <linux/kdebug.h>
  34. #include <linux/prctl.h>
  35. #include <linux/uaccess.h>
  36. #include <linux/io.h>
  37. #include <linux/ftrace.h>
  38. #include <linux/syscalls.h>
  39. #include <asm/pgtable.h>
  40. #include <asm/processor.h>
  41. #include <asm/fpu/internal.h>
  42. #include <asm/mmu_context.h>
  43. #include <asm/prctl.h>
  44. #include <asm/desc.h>
  45. #include <asm/proto.h>
  46. #include <asm/ia32.h>
  47. #include <asm/syscalls.h>
  48. #include <asm/debugreg.h>
  49. #include <asm/switch_to.h>
  50. #include <asm/xen/hypervisor.h>
  51. #include <asm/vdso.h>
  52. #include <asm/intel_rdt_sched.h>
  53. #include <asm/unistd.h>
  54. #include <asm/fsgsbase.h>
  55. #ifdef CONFIG_IA32_EMULATION
  56. /* Not included via unistd.h */
  57. #include <asm/unistd_32_ia32.h>
  58. #endif
  59. #include "process.h"
  60. /* Prints also some state that isn't saved in the pt_regs */
  61. void __show_regs(struct pt_regs *regs, enum show_regs_mode mode)
  62. {
  63. unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
  64. unsigned long d0, d1, d2, d3, d6, d7;
  65. unsigned int fsindex, gsindex;
  66. unsigned int ds, cs, es;
  67. show_iret_regs(regs);
  68. if (regs->orig_ax != -1)
  69. pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
  70. else
  71. pr_cont("\n");
  72. printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
  73. regs->ax, regs->bx, regs->cx);
  74. printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
  75. regs->dx, regs->si, regs->di);
  76. printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
  77. regs->bp, regs->r8, regs->r9);
  78. printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
  79. regs->r10, regs->r11, regs->r12);
  80. printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
  81. regs->r13, regs->r14, regs->r15);
  82. if (mode == SHOW_REGS_SHORT)
  83. return;
  84. if (mode == SHOW_REGS_USER) {
  85. rdmsrl(MSR_FS_BASE, fs);
  86. rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
  87. printk(KERN_DEFAULT "FS: %016lx GS: %016lx\n",
  88. fs, shadowgs);
  89. return;
  90. }
  91. asm("movl %%ds,%0" : "=r" (ds));
  92. asm("movl %%cs,%0" : "=r" (cs));
  93. asm("movl %%es,%0" : "=r" (es));
  94. asm("movl %%fs,%0" : "=r" (fsindex));
  95. asm("movl %%gs,%0" : "=r" (gsindex));
  96. rdmsrl(MSR_FS_BASE, fs);
  97. rdmsrl(MSR_GS_BASE, gs);
  98. rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
  99. cr0 = read_cr0();
  100. cr2 = read_cr2();
  101. cr3 = __read_cr3();
  102. cr4 = __read_cr4();
  103. printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
  104. fs, fsindex, gs, gsindex, shadowgs);
  105. printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
  106. es, cr0);
  107. printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
  108. cr4);
  109. get_debugreg(d0, 0);
  110. get_debugreg(d1, 1);
  111. get_debugreg(d2, 2);
  112. get_debugreg(d3, 3);
  113. get_debugreg(d6, 6);
  114. get_debugreg(d7, 7);
  115. /* Only print out debug registers if they are in their non-default state. */
  116. if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
  117. (d6 == DR6_RESERVED) && (d7 == 0x400))) {
  118. printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
  119. d0, d1, d2);
  120. printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
  121. d3, d6, d7);
  122. }
  123. if (boot_cpu_has(X86_FEATURE_OSPKE))
  124. printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
  125. }
  126. void release_thread(struct task_struct *dead_task)
  127. {
  128. if (dead_task->mm) {
  129. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  130. if (dead_task->mm->context.ldt) {
  131. pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
  132. dead_task->comm,
  133. dead_task->mm->context.ldt->entries,
  134. dead_task->mm->context.ldt->nr_entries);
  135. BUG();
  136. }
  137. #endif
  138. }
  139. }
  140. enum which_selector {
  141. FS,
  142. GS
  143. };
  144. /*
  145. * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
  146. * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
  147. * It's forcibly inlined because it'll generate better code and this function
  148. * is hot.
  149. */
  150. static __always_inline void save_base_legacy(struct task_struct *prev_p,
  151. unsigned short selector,
  152. enum which_selector which)
  153. {
  154. if (likely(selector == 0)) {
  155. /*
  156. * On Intel (without X86_BUG_NULL_SEG), the segment base could
  157. * be the pre-existing saved base or it could be zero. On AMD
  158. * (with X86_BUG_NULL_SEG), the segment base could be almost
  159. * anything.
  160. *
  161. * This branch is very hot (it's hit twice on almost every
  162. * context switch between 64-bit programs), and avoiding
  163. * the RDMSR helps a lot, so we just assume that whatever
  164. * value is already saved is correct. This matches historical
  165. * Linux behavior, so it won't break existing applications.
  166. *
  167. * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
  168. * report that the base is zero, it needs to actually be zero:
  169. * see the corresponding logic in load_seg_legacy.
  170. */
  171. } else {
  172. /*
  173. * If the selector is 1, 2, or 3, then the base is zero on
  174. * !X86_BUG_NULL_SEG CPUs and could be anything on
  175. * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
  176. * has never attempted to preserve the base across context
  177. * switches.
  178. *
  179. * If selector > 3, then it refers to a real segment, and
  180. * saving the base isn't necessary.
  181. */
  182. if (which == FS)
  183. prev_p->thread.fsbase = 0;
  184. else
  185. prev_p->thread.gsbase = 0;
  186. }
  187. }
  188. static __always_inline void save_fsgs(struct task_struct *task)
  189. {
  190. savesegment(fs, task->thread.fsindex);
  191. savesegment(gs, task->thread.gsindex);
  192. save_base_legacy(task, task->thread.fsindex, FS);
  193. save_base_legacy(task, task->thread.gsindex, GS);
  194. }
  195. #if IS_ENABLED(CONFIG_KVM)
  196. /*
  197. * While a process is running,current->thread.fsbase and current->thread.gsbase
  198. * may not match the corresponding CPU registers (see save_base_legacy()). KVM
  199. * wants an efficient way to save and restore FSBASE and GSBASE.
  200. * When FSGSBASE extensions are enabled, this will have to use RD{FS,GS}BASE.
  201. */
  202. void save_fsgs_for_kvm(void)
  203. {
  204. save_fsgs(current);
  205. }
  206. EXPORT_SYMBOL_GPL(save_fsgs_for_kvm);
  207. #endif
  208. static __always_inline void loadseg(enum which_selector which,
  209. unsigned short sel)
  210. {
  211. if (which == FS)
  212. loadsegment(fs, sel);
  213. else
  214. load_gs_index(sel);
  215. }
  216. static __always_inline void load_seg_legacy(unsigned short prev_index,
  217. unsigned long prev_base,
  218. unsigned short next_index,
  219. unsigned long next_base,
  220. enum which_selector which)
  221. {
  222. if (likely(next_index <= 3)) {
  223. /*
  224. * The next task is using 64-bit TLS, is not using this
  225. * segment at all, or is having fun with arcane CPU features.
  226. */
  227. if (next_base == 0) {
  228. /*
  229. * Nasty case: on AMD CPUs, we need to forcibly zero
  230. * the base.
  231. */
  232. if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
  233. loadseg(which, __USER_DS);
  234. loadseg(which, next_index);
  235. } else {
  236. /*
  237. * We could try to exhaustively detect cases
  238. * under which we can skip the segment load,
  239. * but there's really only one case that matters
  240. * for performance: if both the previous and
  241. * next states are fully zeroed, we can skip
  242. * the load.
  243. *
  244. * (This assumes that prev_base == 0 has no
  245. * false positives. This is the case on
  246. * Intel-style CPUs.)
  247. */
  248. if (likely(prev_index | next_index | prev_base))
  249. loadseg(which, next_index);
  250. }
  251. } else {
  252. if (prev_index != next_index)
  253. loadseg(which, next_index);
  254. wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
  255. next_base);
  256. }
  257. } else {
  258. /*
  259. * The next task is using a real segment. Loading the selector
  260. * is sufficient.
  261. */
  262. loadseg(which, next_index);
  263. }
  264. }
  265. static __always_inline void x86_fsgsbase_load(struct thread_struct *prev,
  266. struct thread_struct *next)
  267. {
  268. load_seg_legacy(prev->fsindex, prev->fsbase,
  269. next->fsindex, next->fsbase, FS);
  270. load_seg_legacy(prev->gsindex, prev->gsbase,
  271. next->gsindex, next->gsbase, GS);
  272. }
  273. static unsigned long x86_fsgsbase_read_task(struct task_struct *task,
  274. unsigned short selector)
  275. {
  276. unsigned short idx = selector >> 3;
  277. unsigned long base;
  278. if (likely((selector & SEGMENT_TI_MASK) == 0)) {
  279. if (unlikely(idx >= GDT_ENTRIES))
  280. return 0;
  281. /*
  282. * There are no user segments in the GDT with nonzero bases
  283. * other than the TLS segments.
  284. */
  285. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  286. return 0;
  287. idx -= GDT_ENTRY_TLS_MIN;
  288. base = get_desc_base(&task->thread.tls_array[idx]);
  289. } else {
  290. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  291. struct ldt_struct *ldt;
  292. /*
  293. * If performance here mattered, we could protect the LDT
  294. * with RCU. This is a slow path, though, so we can just
  295. * take the mutex.
  296. */
  297. mutex_lock(&task->mm->context.lock);
  298. ldt = task->mm->context.ldt;
  299. if (unlikely(idx >= ldt->nr_entries))
  300. base = 0;
  301. else
  302. base = get_desc_base(ldt->entries + idx);
  303. mutex_unlock(&task->mm->context.lock);
  304. #else
  305. base = 0;
  306. #endif
  307. }
  308. return base;
  309. }
  310. void x86_fsbase_write_cpu(unsigned long fsbase)
  311. {
  312. /*
  313. * Set the selector to 0 as a notion, that the segment base is
  314. * overwritten, which will be checked for skipping the segment load
  315. * during context switch.
  316. */
  317. loadseg(FS, 0);
  318. wrmsrl(MSR_FS_BASE, fsbase);
  319. }
  320. void x86_gsbase_write_cpu_inactive(unsigned long gsbase)
  321. {
  322. /* Set the selector to 0 for the same reason as %fs above. */
  323. loadseg(GS, 0);
  324. wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
  325. }
  326. unsigned long x86_fsbase_read_task(struct task_struct *task)
  327. {
  328. unsigned long fsbase;
  329. if (task == current)
  330. fsbase = x86_fsbase_read_cpu();
  331. else if (task->thread.fsindex == 0)
  332. fsbase = task->thread.fsbase;
  333. else
  334. fsbase = x86_fsgsbase_read_task(task, task->thread.fsindex);
  335. return fsbase;
  336. }
  337. unsigned long x86_gsbase_read_task(struct task_struct *task)
  338. {
  339. unsigned long gsbase;
  340. if (task == current)
  341. gsbase = x86_gsbase_read_cpu_inactive();
  342. else if (task->thread.gsindex == 0)
  343. gsbase = task->thread.gsbase;
  344. else
  345. gsbase = x86_fsgsbase_read_task(task, task->thread.gsindex);
  346. return gsbase;
  347. }
  348. int x86_fsbase_write_task(struct task_struct *task, unsigned long fsbase)
  349. {
  350. /*
  351. * Not strictly needed for %fs, but do it for symmetry
  352. * with %gs
  353. */
  354. if (unlikely(fsbase >= TASK_SIZE_MAX))
  355. return -EPERM;
  356. preempt_disable();
  357. task->thread.fsbase = fsbase;
  358. if (task == current)
  359. x86_fsbase_write_cpu(fsbase);
  360. task->thread.fsindex = 0;
  361. preempt_enable();
  362. return 0;
  363. }
  364. int x86_gsbase_write_task(struct task_struct *task, unsigned long gsbase)
  365. {
  366. if (unlikely(gsbase >= TASK_SIZE_MAX))
  367. return -EPERM;
  368. preempt_disable();
  369. task->thread.gsbase = gsbase;
  370. if (task == current)
  371. x86_gsbase_write_cpu_inactive(gsbase);
  372. task->thread.gsindex = 0;
  373. preempt_enable();
  374. return 0;
  375. }
  376. int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
  377. unsigned long arg, struct task_struct *p, unsigned long tls)
  378. {
  379. int err;
  380. struct pt_regs *childregs;
  381. struct fork_frame *fork_frame;
  382. struct inactive_task_frame *frame;
  383. struct task_struct *me = current;
  384. childregs = task_pt_regs(p);
  385. fork_frame = container_of(childregs, struct fork_frame, regs);
  386. frame = &fork_frame->frame;
  387. frame->bp = 0;
  388. frame->ret_addr = (unsigned long) ret_from_fork;
  389. p->thread.sp = (unsigned long) fork_frame;
  390. p->thread.io_bitmap_ptr = NULL;
  391. savesegment(gs, p->thread.gsindex);
  392. p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
  393. savesegment(fs, p->thread.fsindex);
  394. p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
  395. savesegment(es, p->thread.es);
  396. savesegment(ds, p->thread.ds);
  397. memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
  398. if (unlikely(p->flags & PF_KTHREAD)) {
  399. /* kernel thread */
  400. memset(childregs, 0, sizeof(struct pt_regs));
  401. frame->bx = sp; /* function */
  402. frame->r12 = arg;
  403. return 0;
  404. }
  405. frame->bx = 0;
  406. *childregs = *current_pt_regs();
  407. childregs->ax = 0;
  408. if (sp)
  409. childregs->sp = sp;
  410. err = -ENOMEM;
  411. if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
  412. p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
  413. IO_BITMAP_BYTES, GFP_KERNEL);
  414. if (!p->thread.io_bitmap_ptr) {
  415. p->thread.io_bitmap_max = 0;
  416. return -ENOMEM;
  417. }
  418. set_tsk_thread_flag(p, TIF_IO_BITMAP);
  419. }
  420. /*
  421. * Set a new TLS for the child thread?
  422. */
  423. if (clone_flags & CLONE_SETTLS) {
  424. #ifdef CONFIG_IA32_EMULATION
  425. if (in_ia32_syscall())
  426. err = do_set_thread_area(p, -1,
  427. (struct user_desc __user *)tls, 0);
  428. else
  429. #endif
  430. err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
  431. if (err)
  432. goto out;
  433. }
  434. err = 0;
  435. out:
  436. if (err && p->thread.io_bitmap_ptr) {
  437. kfree(p->thread.io_bitmap_ptr);
  438. p->thread.io_bitmap_max = 0;
  439. }
  440. return err;
  441. }
  442. static void
  443. start_thread_common(struct pt_regs *regs, unsigned long new_ip,
  444. unsigned long new_sp,
  445. unsigned int _cs, unsigned int _ss, unsigned int _ds)
  446. {
  447. WARN_ON_ONCE(regs != current_pt_regs());
  448. if (static_cpu_has(X86_BUG_NULL_SEG)) {
  449. /* Loading zero below won't clear the base. */
  450. loadsegment(fs, __USER_DS);
  451. load_gs_index(__USER_DS);
  452. }
  453. loadsegment(fs, 0);
  454. loadsegment(es, _ds);
  455. loadsegment(ds, _ds);
  456. load_gs_index(0);
  457. regs->ip = new_ip;
  458. regs->sp = new_sp;
  459. regs->cs = _cs;
  460. regs->ss = _ss;
  461. regs->flags = X86_EFLAGS_IF;
  462. force_iret();
  463. }
  464. void
  465. start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
  466. {
  467. start_thread_common(regs, new_ip, new_sp,
  468. __USER_CS, __USER_DS, 0);
  469. }
  470. EXPORT_SYMBOL_GPL(start_thread);
  471. #ifdef CONFIG_COMPAT
  472. void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
  473. {
  474. start_thread_common(regs, new_ip, new_sp,
  475. test_thread_flag(TIF_X32)
  476. ? __USER_CS : __USER32_CS,
  477. __USER_DS, __USER_DS);
  478. }
  479. #endif
  480. /*
  481. * switch_to(x,y) should switch tasks from x to y.
  482. *
  483. * This could still be optimized:
  484. * - fold all the options into a flag word and test it with a single test.
  485. * - could test fs/gs bitsliced
  486. *
  487. * Kprobes not supported here. Set the probe on schedule instead.
  488. * Function graph tracer not supported too.
  489. */
  490. __visible __notrace_funcgraph struct task_struct *
  491. __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
  492. {
  493. struct thread_struct *prev = &prev_p->thread;
  494. struct thread_struct *next = &next_p->thread;
  495. struct fpu *prev_fpu = &prev->fpu;
  496. struct fpu *next_fpu = &next->fpu;
  497. int cpu = smp_processor_id();
  498. WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
  499. this_cpu_read(irq_count) != -1);
  500. switch_fpu_prepare(prev_fpu, cpu);
  501. /* We must save %fs and %gs before load_TLS() because
  502. * %fs and %gs may be cleared by load_TLS().
  503. *
  504. * (e.g. xen_load_tls())
  505. */
  506. save_fsgs(prev_p);
  507. /*
  508. * Load TLS before restoring any segments so that segment loads
  509. * reference the correct GDT entries.
  510. */
  511. load_TLS(next, cpu);
  512. /*
  513. * Leave lazy mode, flushing any hypercalls made here. This
  514. * must be done after loading TLS entries in the GDT but before
  515. * loading segments that might reference them, and and it must
  516. * be done before fpu__restore(), so the TS bit is up to
  517. * date.
  518. */
  519. arch_end_context_switch(next_p);
  520. /* Switch DS and ES.
  521. *
  522. * Reading them only returns the selectors, but writing them (if
  523. * nonzero) loads the full descriptor from the GDT or LDT. The
  524. * LDT for next is loaded in switch_mm, and the GDT is loaded
  525. * above.
  526. *
  527. * We therefore need to write new values to the segment
  528. * registers on every context switch unless both the new and old
  529. * values are zero.
  530. *
  531. * Note that we don't need to do anything for CS and SS, as
  532. * those are saved and restored as part of pt_regs.
  533. */
  534. savesegment(es, prev->es);
  535. if (unlikely(next->es | prev->es))
  536. loadsegment(es, next->es);
  537. savesegment(ds, prev->ds);
  538. if (unlikely(next->ds | prev->ds))
  539. loadsegment(ds, next->ds);
  540. x86_fsgsbase_load(prev, next);
  541. switch_fpu_finish(next_fpu, cpu);
  542. /*
  543. * Switch the PDA and FPU contexts.
  544. */
  545. this_cpu_write(current_task, next_p);
  546. this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
  547. /* Reload sp0. */
  548. update_task_stack(next_p);
  549. switch_to_extra(prev_p, next_p);
  550. #ifdef CONFIG_XEN_PV
  551. /*
  552. * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
  553. * current_pt_regs()->flags may not match the current task's
  554. * intended IOPL. We need to switch it manually.
  555. */
  556. if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
  557. prev->iopl != next->iopl))
  558. xen_set_iopl_mask(next->iopl);
  559. #endif
  560. if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
  561. /*
  562. * AMD CPUs have a misfeature: SYSRET sets the SS selector but
  563. * does not update the cached descriptor. As a result, if we
  564. * do SYSRET while SS is NULL, we'll end up in user mode with
  565. * SS apparently equal to __USER_DS but actually unusable.
  566. *
  567. * The straightforward workaround would be to fix it up just
  568. * before SYSRET, but that would slow down the system call
  569. * fast paths. Instead, we ensure that SS is never NULL in
  570. * system call context. We do this by replacing NULL SS
  571. * selectors at every context switch. SYSCALL sets up a valid
  572. * SS, so the only way to get NULL is to re-enter the kernel
  573. * from CPL 3 through an interrupt. Since that can't happen
  574. * in the same task as a running syscall, we are guaranteed to
  575. * context switch between every interrupt vector entry and a
  576. * subsequent SYSRET.
  577. *
  578. * We read SS first because SS reads are much faster than
  579. * writes. Out of caution, we force SS to __KERNEL_DS even if
  580. * it previously had a different non-NULL value.
  581. */
  582. unsigned short ss_sel;
  583. savesegment(ss, ss_sel);
  584. if (ss_sel != __KERNEL_DS)
  585. loadsegment(ss, __KERNEL_DS);
  586. }
  587. /* Load the Intel cache allocation PQR MSR. */
  588. intel_rdt_sched_in();
  589. return prev_p;
  590. }
  591. void set_personality_64bit(void)
  592. {
  593. /* inherit personality from parent */
  594. /* Make sure to be in 64bit mode */
  595. clear_thread_flag(TIF_IA32);
  596. clear_thread_flag(TIF_ADDR32);
  597. clear_thread_flag(TIF_X32);
  598. /* Pretend that this comes from a 64bit execve */
  599. task_pt_regs(current)->orig_ax = __NR_execve;
  600. current_thread_info()->status &= ~TS_COMPAT;
  601. /* Ensure the corresponding mm is not marked. */
  602. if (current->mm)
  603. current->mm->context.ia32_compat = 0;
  604. /* TBD: overwrites user setup. Should have two bits.
  605. But 64bit processes have always behaved this way,
  606. so it's not too bad. The main problem is just that
  607. 32bit childs are affected again. */
  608. current->personality &= ~READ_IMPLIES_EXEC;
  609. }
  610. static void __set_personality_x32(void)
  611. {
  612. #ifdef CONFIG_X86_X32
  613. clear_thread_flag(TIF_IA32);
  614. set_thread_flag(TIF_X32);
  615. if (current->mm)
  616. current->mm->context.ia32_compat = TIF_X32;
  617. current->personality &= ~READ_IMPLIES_EXEC;
  618. /*
  619. * in_32bit_syscall() uses the presence of the x32 syscall bit
  620. * flag to determine compat status. The x86 mmap() code relies on
  621. * the syscall bitness so set x32 syscall bit right here to make
  622. * in_32bit_syscall() work during exec().
  623. *
  624. * Pretend to come from a x32 execve.
  625. */
  626. task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
  627. current_thread_info()->status &= ~TS_COMPAT;
  628. #endif
  629. }
  630. static void __set_personality_ia32(void)
  631. {
  632. #ifdef CONFIG_IA32_EMULATION
  633. set_thread_flag(TIF_IA32);
  634. clear_thread_flag(TIF_X32);
  635. if (current->mm)
  636. current->mm->context.ia32_compat = TIF_IA32;
  637. current->personality |= force_personality32;
  638. /* Prepare the first "return" to user space */
  639. task_pt_regs(current)->orig_ax = __NR_ia32_execve;
  640. current_thread_info()->status |= TS_COMPAT;
  641. #endif
  642. }
  643. void set_personality_ia32(bool x32)
  644. {
  645. /* Make sure to be in 32bit mode */
  646. set_thread_flag(TIF_ADDR32);
  647. if (x32)
  648. __set_personality_x32();
  649. else
  650. __set_personality_ia32();
  651. }
  652. EXPORT_SYMBOL_GPL(set_personality_ia32);
  653. #ifdef CONFIG_CHECKPOINT_RESTORE
  654. static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
  655. {
  656. int ret;
  657. ret = map_vdso_once(image, addr);
  658. if (ret)
  659. return ret;
  660. return (long)image->size;
  661. }
  662. #endif
  663. long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
  664. {
  665. int ret = 0;
  666. switch (option) {
  667. case ARCH_SET_GS: {
  668. ret = x86_gsbase_write_task(task, arg2);
  669. break;
  670. }
  671. case ARCH_SET_FS: {
  672. ret = x86_fsbase_write_task(task, arg2);
  673. break;
  674. }
  675. case ARCH_GET_FS: {
  676. unsigned long base = x86_fsbase_read_task(task);
  677. ret = put_user(base, (unsigned long __user *)arg2);
  678. break;
  679. }
  680. case ARCH_GET_GS: {
  681. unsigned long base = x86_gsbase_read_task(task);
  682. ret = put_user(base, (unsigned long __user *)arg2);
  683. break;
  684. }
  685. #ifdef CONFIG_CHECKPOINT_RESTORE
  686. # ifdef CONFIG_X86_X32_ABI
  687. case ARCH_MAP_VDSO_X32:
  688. return prctl_map_vdso(&vdso_image_x32, arg2);
  689. # endif
  690. # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  691. case ARCH_MAP_VDSO_32:
  692. return prctl_map_vdso(&vdso_image_32, arg2);
  693. # endif
  694. case ARCH_MAP_VDSO_64:
  695. return prctl_map_vdso(&vdso_image_64, arg2);
  696. #endif
  697. default:
  698. ret = -EINVAL;
  699. break;
  700. }
  701. return ret;
  702. }
  703. SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
  704. {
  705. long ret;
  706. ret = do_arch_prctl_64(current, option, arg2);
  707. if (ret == -EINVAL)
  708. ret = do_arch_prctl_common(current, option, arg2);
  709. return ret;
  710. }
  711. #ifdef CONFIG_IA32_EMULATION
  712. COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
  713. {
  714. return do_arch_prctl_common(current, option, arg2);
  715. }
  716. #endif
  717. unsigned long KSTK_ESP(struct task_struct *task)
  718. {
  719. return task_pt_regs(task)->sp;
  720. }