tls.c 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. #include <linux/kernel.h>
  2. #include <linux/errno.h>
  3. #include <linux/sched.h>
  4. #include <linux/user.h>
  5. #include <linux/regset.h>
  6. #include <linux/syscalls.h>
  7. #include <linux/uaccess.h>
  8. #include <asm/desc.h>
  9. #include <asm/ldt.h>
  10. #include <asm/processor.h>
  11. #include <asm/proto.h>
  12. #include "tls.h"
  13. /*
  14. * sys_alloc_thread_area: get a yet unused TLS descriptor index.
  15. */
  16. static int get_free_idx(void)
  17. {
  18. struct thread_struct *t = &current->thread;
  19. int idx;
  20. for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  21. if (desc_empty(&t->tls_array[idx]))
  22. return idx + GDT_ENTRY_TLS_MIN;
  23. return -ESRCH;
  24. }
  25. static bool tls_desc_okay(const struct user_desc *info)
  26. {
  27. /*
  28. * For historical reasons (i.e. no one ever documented how any
  29. * of the segmentation APIs work), user programs can and do
  30. * assume that a struct user_desc that's all zeros except for
  31. * entry_number means "no segment at all". This never actually
  32. * worked. In fact, up to Linux 3.19, a struct user_desc like
  33. * this would create a 16-bit read-write segment with base and
  34. * limit both equal to zero.
  35. *
  36. * That was close enough to "no segment at all" until we
  37. * hardened this function to disallow 16-bit TLS segments. Fix
  38. * it up by interpreting these zeroed segments the way that they
  39. * were almost certainly intended to be interpreted.
  40. *
  41. * The correct way to ask for "no segment at all" is to specify
  42. * a user_desc that satisfies LDT_empty. To keep everything
  43. * working, we accept both.
  44. *
  45. * Note that there's a similar kludge in modify_ldt -- look at
  46. * the distinction between modes 1 and 0x11.
  47. */
  48. if (LDT_empty(info) || LDT_zero(info))
  49. return true;
  50. /*
  51. * espfix is required for 16-bit data segments, but espfix
  52. * only works for LDT segments.
  53. */
  54. if (!info->seg_32bit)
  55. return false;
  56. /* Only allow data segments in the TLS array. */
  57. if (info->contents > 1)
  58. return false;
  59. /*
  60. * Non-present segments with DPL 3 present an interesting attack
  61. * surface. The kernel should handle such segments correctly,
  62. * but TLS is very difficult to protect in a sandbox, so prevent
  63. * such segments from being created.
  64. *
  65. * If userspace needs to remove a TLS entry, it can still delete
  66. * it outright.
  67. */
  68. if (info->seg_not_present)
  69. return false;
  70. return true;
  71. }
  72. static void set_tls_desc(struct task_struct *p, int idx,
  73. const struct user_desc *info, int n)
  74. {
  75. struct thread_struct *t = &p->thread;
  76. struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
  77. int cpu;
  78. /*
  79. * We must not get preempted while modifying the TLS.
  80. */
  81. cpu = get_cpu();
  82. while (n-- > 0) {
  83. if (LDT_empty(info) || LDT_zero(info)) {
  84. desc->a = desc->b = 0;
  85. } else {
  86. fill_ldt(desc, info);
  87. /*
  88. * Always set the accessed bit so that the CPU
  89. * doesn't try to write to the (read-only) GDT.
  90. */
  91. desc->type |= 1;
  92. }
  93. ++info;
  94. ++desc;
  95. }
  96. if (t == &current->thread)
  97. load_TLS(t, cpu);
  98. put_cpu();
  99. }
  100. /*
  101. * Set a given TLS descriptor:
  102. */
  103. int do_set_thread_area(struct task_struct *p, int idx,
  104. struct user_desc __user *u_info,
  105. int can_allocate)
  106. {
  107. struct user_desc info;
  108. unsigned short __maybe_unused sel, modified_sel;
  109. if (copy_from_user(&info, u_info, sizeof(info)))
  110. return -EFAULT;
  111. if (!tls_desc_okay(&info))
  112. return -EINVAL;
  113. if (idx == -1)
  114. idx = info.entry_number;
  115. /*
  116. * index -1 means the kernel should try to find and
  117. * allocate an empty descriptor:
  118. */
  119. if (idx == -1 && can_allocate) {
  120. idx = get_free_idx();
  121. if (idx < 0)
  122. return idx;
  123. if (put_user(idx, &u_info->entry_number))
  124. return -EFAULT;
  125. }
  126. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  127. return -EINVAL;
  128. set_tls_desc(p, idx, &info, 1);
  129. /*
  130. * If DS, ES, FS, or GS points to the modified segment, forcibly
  131. * refresh it. Only needed on x86_64 because x86_32 reloads them
  132. * on return to user mode.
  133. */
  134. modified_sel = (idx << 3) | 3;
  135. if (p == current) {
  136. #ifdef CONFIG_X86_64
  137. savesegment(ds, sel);
  138. if (sel == modified_sel)
  139. loadsegment(ds, sel);
  140. savesegment(es, sel);
  141. if (sel == modified_sel)
  142. loadsegment(es, sel);
  143. savesegment(fs, sel);
  144. if (sel == modified_sel)
  145. loadsegment(fs, sel);
  146. savesegment(gs, sel);
  147. if (sel == modified_sel)
  148. load_gs_index(sel);
  149. #endif
  150. #ifdef CONFIG_X86_32_LAZY_GS
  151. savesegment(gs, sel);
  152. if (sel == modified_sel)
  153. loadsegment(gs, sel);
  154. #endif
  155. } else {
  156. #ifdef CONFIG_X86_64
  157. if (p->thread.fsindex == modified_sel)
  158. p->thread.fsbase = info.base_addr;
  159. if (p->thread.gsindex == modified_sel)
  160. p->thread.gsbase = info.base_addr;
  161. #endif
  162. }
  163. return 0;
  164. }
  165. SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, u_info)
  166. {
  167. return do_set_thread_area(current, -1, u_info, 1);
  168. }
  169. /*
  170. * Get the current Thread-Local Storage area:
  171. */
  172. static void fill_user_desc(struct user_desc *info, int idx,
  173. const struct desc_struct *desc)
  174. {
  175. memset(info, 0, sizeof(*info));
  176. info->entry_number = idx;
  177. info->base_addr = get_desc_base(desc);
  178. info->limit = get_desc_limit(desc);
  179. info->seg_32bit = desc->d;
  180. info->contents = desc->type >> 2;
  181. info->read_exec_only = !(desc->type & 2);
  182. info->limit_in_pages = desc->g;
  183. info->seg_not_present = !desc->p;
  184. info->useable = desc->avl;
  185. #ifdef CONFIG_X86_64
  186. info->lm = desc->l;
  187. #endif
  188. }
  189. int do_get_thread_area(struct task_struct *p, int idx,
  190. struct user_desc __user *u_info)
  191. {
  192. struct user_desc info;
  193. if (idx == -1 && get_user(idx, &u_info->entry_number))
  194. return -EFAULT;
  195. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  196. return -EINVAL;
  197. fill_user_desc(&info, idx,
  198. &p->thread.tls_array[idx - GDT_ENTRY_TLS_MIN]);
  199. if (copy_to_user(u_info, &info, sizeof(info)))
  200. return -EFAULT;
  201. return 0;
  202. }
  203. SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, u_info)
  204. {
  205. return do_get_thread_area(current, -1, u_info);
  206. }
  207. int regset_tls_active(struct task_struct *target,
  208. const struct user_regset *regset)
  209. {
  210. struct thread_struct *t = &target->thread;
  211. int n = GDT_ENTRY_TLS_ENTRIES;
  212. while (n > 0 && desc_empty(&t->tls_array[n - 1]))
  213. --n;
  214. return n;
  215. }
  216. int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
  217. unsigned int pos, unsigned int count,
  218. void *kbuf, void __user *ubuf)
  219. {
  220. const struct desc_struct *tls;
  221. if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
  222. (pos % sizeof(struct user_desc)) != 0 ||
  223. (count % sizeof(struct user_desc)) != 0)
  224. return -EINVAL;
  225. pos /= sizeof(struct user_desc);
  226. count /= sizeof(struct user_desc);
  227. tls = &target->thread.tls_array[pos];
  228. if (kbuf) {
  229. struct user_desc *info = kbuf;
  230. while (count-- > 0)
  231. fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++,
  232. tls++);
  233. } else {
  234. struct user_desc __user *u_info = ubuf;
  235. while (count-- > 0) {
  236. struct user_desc info;
  237. fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++);
  238. if (__copy_to_user(u_info++, &info, sizeof(info)))
  239. return -EFAULT;
  240. }
  241. }
  242. return 0;
  243. }
  244. int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
  245. unsigned int pos, unsigned int count,
  246. const void *kbuf, const void __user *ubuf)
  247. {
  248. struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
  249. const struct user_desc *info;
  250. int i;
  251. if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
  252. (pos % sizeof(struct user_desc)) != 0 ||
  253. (count % sizeof(struct user_desc)) != 0)
  254. return -EINVAL;
  255. if (kbuf)
  256. info = kbuf;
  257. else if (__copy_from_user(infobuf, ubuf, count))
  258. return -EFAULT;
  259. else
  260. info = infobuf;
  261. for (i = 0; i < count / sizeof(struct user_desc); i++)
  262. if (!tls_desc_okay(info + i))
  263. return -EINVAL;
  264. set_tls_desc(target,
  265. GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
  266. info, count / sizeof(struct user_desc));
  267. return 0;
  268. }