|
@@ -114,6 +114,7 @@ int do_set_thread_area(struct task_struct *p, int idx,
|
|
int can_allocate)
|
|
int can_allocate)
|
|
{
|
|
{
|
|
struct user_desc info;
|
|
struct user_desc info;
|
|
|
|
+ unsigned short __maybe_unused sel, modified_sel;
|
|
|
|
|
|
if (copy_from_user(&info, u_info, sizeof(info)))
|
|
if (copy_from_user(&info, u_info, sizeof(info)))
|
|
return -EFAULT;
|
|
return -EFAULT;
|
|
@@ -141,6 +142,47 @@ int do_set_thread_area(struct task_struct *p, int idx,
|
|
|
|
|
|
set_tls_desc(p, idx, &info, 1);
|
|
set_tls_desc(p, idx, &info, 1);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * If DS, ES, FS, or GS points to the modified segment, forcibly
|
|
|
|
+ * refresh it. Only needed on x86_64 because x86_32 reloads them
|
|
|
|
+ * on return to user mode.
|
|
|
|
+ */
|
|
|
|
+ modified_sel = (idx << 3) | 3;
|
|
|
|
+
|
|
|
|
+ if (p == current) {
|
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
|
+ savesegment(ds, sel);
|
|
|
|
+ if (sel == modified_sel)
|
|
|
|
+ loadsegment(ds, sel);
|
|
|
|
+
|
|
|
|
+ savesegment(es, sel);
|
|
|
|
+ if (sel == modified_sel)
|
|
|
|
+ loadsegment(es, sel);
|
|
|
|
+
|
|
|
|
+ savesegment(fs, sel);
|
|
|
|
+ if (sel == modified_sel)
|
|
|
|
+ loadsegment(fs, sel);
|
|
|
|
+
|
|
|
|
+ savesegment(gs, sel);
|
|
|
|
+ if (sel == modified_sel)
|
|
|
|
+ load_gs_index(sel);
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_X86_32_LAZY_GS
|
|
|
|
+ savesegment(gs, sel);
|
|
|
|
+ if (sel == modified_sel)
|
|
|
|
+ loadsegment(gs, sel);
|
|
|
|
+#endif
|
|
|
|
+ } else {
|
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
|
+ if (p->thread.fsindex == modified_sel)
|
|
|
|
+ p->thread.fsbase = info.base_addr;
|
|
|
|
+
|
|
|
|
+ if (p->thread.gsindex == modified_sel)
|
|
|
|
+ p->thread.gsbase = info.base_addr;
|
|
|
|
+#endif
|
|
|
|
+ }
|
|
|
|
+
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|