|
@@ -99,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt)
|
|
|
/*
|
|
|
* segment registers
|
|
|
*/
|
|
|
-#ifdef CONFIG_X86_32
|
|
|
- savesegment(es, ctxt->es);
|
|
|
- savesegment(fs, ctxt->fs);
|
|
|
+#ifdef CONFIG_X86_32_LAZY_GS
|
|
|
savesegment(gs, ctxt->gs);
|
|
|
- savesegment(ss, ctxt->ss);
|
|
|
-#else
|
|
|
-/* CONFIG_X86_64 */
|
|
|
- asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
|
|
|
- asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
|
|
|
- asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
|
|
|
- asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
|
|
|
- asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
|
|
|
+#endif
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+ savesegment(gs, ctxt->gs);
|
|
|
+ savesegment(fs, ctxt->fs);
|
|
|
+ savesegment(ds, ctxt->ds);
|
|
|
+ savesegment(es, ctxt->es);
|
|
|
|
|
|
rdmsrl(MSR_FS_BASE, ctxt->fs_base);
|
|
|
- rdmsrl(MSR_GS_BASE, ctxt->gs_base);
|
|
|
- rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
|
|
|
+ rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
|
|
|
+ rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
|
|
|
mtrr_save_fixed_ranges(NULL);
|
|
|
|
|
|
rdmsrl(MSR_EFER, ctxt->efer);
|
|
@@ -189,9 +185,12 @@ static void fix_processor_context(void)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * __restore_processor_state - restore the contents of CPU registers saved
|
|
|
- * by __save_processor_state()
|
|
|
- * @ctxt - structure to load the registers contents from
|
|
|
+ * __restore_processor_state - restore the contents of CPU registers saved
|
|
|
+ * by __save_processor_state()
|
|
|
+ * @ctxt - structure to load the registers contents from
|
|
|
+ *
|
|
|
+ * The asm code that gets us here will have restored a usable GDT, although
|
|
|
+ * it will be pointing to the wrong alias.
|
|
|
*/
|
|
|
static void notrace __restore_processor_state(struct saved_context *ctxt)
|
|
|
{
|
|
@@ -214,46 +213,50 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
|
|
|
write_cr2(ctxt->cr2);
|
|
|
write_cr0(ctxt->cr0);
|
|
|
|
|
|
+ /* Restore the IDT. */
|
|
|
+ load_idt(&ctxt->idt);
|
|
|
+
|
|
|
/*
|
|
|
- * now restore the descriptor tables to their proper values
|
|
|
- * ltr is done i fix_processor_context().
|
|
|
+ * Just in case the asm code got us here with the SS, DS, or ES
|
|
|
+ * out of sync with the GDT, update them.
|
|
|
*/
|
|
|
- load_idt(&ctxt->idt);
|
|
|
+ loadsegment(ss, __KERNEL_DS);
|
|
|
+ loadsegment(ds, __USER_DS);
|
|
|
+ loadsegment(es, __USER_DS);
|
|
|
|
|
|
-#ifdef CONFIG_X86_64
|
|
|
/*
|
|
|
- * We need GSBASE restored before percpu access can work.
|
|
|
- * percpu access can happen in exception handlers or in complicated
|
|
|
- * helpers like load_gs_index().
|
|
|
+ * Restore percpu access. Percpu access can happen in exception
|
|
|
+ * handlers or in complicated helpers like load_gs_index().
|
|
|
*/
|
|
|
- wrmsrl(MSR_GS_BASE, ctxt->gs_base);
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+ wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
|
|
|
+#else
|
|
|
+ loadsegment(fs, __KERNEL_PERCPU);
|
|
|
+ loadsegment(gs, __KERNEL_STACK_CANARY);
|
|
|
#endif
|
|
|
|
|
|
+ /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
|
|
|
fix_processor_context();
|
|
|
|
|
|
/*
|
|
|
- * Restore segment registers. This happens after restoring the GDT
|
|
|
- * and LDT, which happen in fix_processor_context().
|
|
|
+ * Now that we have descriptor tables fully restored and working
|
|
|
+ * exception handling, restore the usermode segments.
|
|
|
*/
|
|
|
-#ifdef CONFIG_X86_32
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+ loadsegment(ds, ctxt->es);
|
|
|
loadsegment(es, ctxt->es);
|
|
|
loadsegment(fs, ctxt->fs);
|
|
|
- loadsegment(gs, ctxt->gs);
|
|
|
- loadsegment(ss, ctxt->ss);
|
|
|
-#else
|
|
|
-/* CONFIG_X86_64 */
|
|
|
- asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
|
|
|
- asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
|
|
|
- asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
|
|
|
load_gs_index(ctxt->gs);
|
|
|
- asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
|
|
|
|
|
|
/*
|
|
|
- * Restore FSBASE and user GSBASE after reloading the respective
|
|
|
- * segment selectors.
|
|
|
+ * Restore FSBASE and GSBASE after restoring the selectors, since
|
|
|
+ * restoring the selectors clobbers the bases. Keep in mind
|
|
|
+ * that MSR_KERNEL_GS_BASE is horribly misnamed.
|
|
|
*/
|
|
|
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
|
|
|
- wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
|
|
|
+ wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
|
|
|
+#elif defined(CONFIG_X86_32_LAZY_GS)
|
|
|
+ loadsegment(gs, ctxt->gs);
|
|
|
#endif
|
|
|
|
|
|
do_fpu_end();
|