tls.c 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252
  1. #include <linux/kernel.h>
  2. #include <linux/errno.h>
  3. #include <linux/sched.h>
  4. #include <linux/user.h>
  5. #include <linux/regset.h>
  6. #include <linux/syscalls.h>
  7. #include <asm/uaccess.h>
  8. #include <asm/desc.h>
  9. #include <asm/ldt.h>
  10. #include <asm/processor.h>
  11. #include <asm/proto.h>
  12. #include "tls.h"
  13. /*
  14. * sys_alloc_thread_area: get a yet unused TLS descriptor index.
  15. */
  16. static int get_free_idx(void)
  17. {
  18. struct thread_struct *t = &current->thread;
  19. int idx;
  20. for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
  21. if (desc_empty(&t->tls_array[idx]))
  22. return idx + GDT_ENTRY_TLS_MIN;
  23. return -ESRCH;
  24. }
  25. static bool tls_desc_okay(const struct user_desc *info)
  26. {
  27. if (LDT_empty(info))
  28. return true;
  29. /*
  30. * espfix is required for 16-bit data segments, but espfix
  31. * only works for LDT segments.
  32. */
  33. if (!info->seg_32bit)
  34. return false;
  35. /* Only allow data segments in the TLS array. */
  36. if (info->contents > 1)
  37. return false;
  38. /*
  39. * Non-present segments with DPL 3 present an interesting attack
  40. * surface. The kernel should handle such segments correctly,
  41. * but TLS is very difficult to protect in a sandbox, so prevent
  42. * such segments from being created.
  43. *
  44. * If userspace needs to remove a TLS entry, it can still delete
  45. * it outright.
  46. */
  47. if (info->seg_not_present)
  48. return false;
  49. return true;
  50. }
  51. static void set_tls_desc(struct task_struct *p, int idx,
  52. const struct user_desc *info, int n)
  53. {
  54. struct thread_struct *t = &p->thread;
  55. struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
  56. int cpu;
  57. /*
  58. * We must not get preempted while modifying the TLS.
  59. */
  60. cpu = get_cpu();
  61. while (n-- > 0) {
  62. if (LDT_empty(info))
  63. desc->a = desc->b = 0;
  64. else
  65. fill_ldt(desc, info);
  66. ++info;
  67. ++desc;
  68. }
  69. if (t == &current->thread)
  70. load_TLS(t, cpu);
  71. put_cpu();
  72. }
  73. /*
  74. * Set a given TLS descriptor:
  75. */
  76. int do_set_thread_area(struct task_struct *p, int idx,
  77. struct user_desc __user *u_info,
  78. int can_allocate)
  79. {
  80. struct user_desc info;
  81. if (copy_from_user(&info, u_info, sizeof(info)))
  82. return -EFAULT;
  83. if (!tls_desc_okay(&info))
  84. return -EINVAL;
  85. if (idx == -1)
  86. idx = info.entry_number;
  87. /*
  88. * index -1 means the kernel should try to find and
  89. * allocate an empty descriptor:
  90. */
  91. if (idx == -1 && can_allocate) {
  92. idx = get_free_idx();
  93. if (idx < 0)
  94. return idx;
  95. if (put_user(idx, &u_info->entry_number))
  96. return -EFAULT;
  97. }
  98. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  99. return -EINVAL;
  100. set_tls_desc(p, idx, &info, 1);
  101. return 0;
  102. }
  103. SYSCALL_DEFINE1(set_thread_area, struct user_desc __user *, u_info)
  104. {
  105. return do_set_thread_area(current, -1, u_info, 1);
  106. }
  107. /*
  108. * Get the current Thread-Local Storage area:
  109. */
  110. static void fill_user_desc(struct user_desc *info, int idx,
  111. const struct desc_struct *desc)
  112. {
  113. memset(info, 0, sizeof(*info));
  114. info->entry_number = idx;
  115. info->base_addr = get_desc_base(desc);
  116. info->limit = get_desc_limit(desc);
  117. info->seg_32bit = desc->d;
  118. info->contents = desc->type >> 2;
  119. info->read_exec_only = !(desc->type & 2);
  120. info->limit_in_pages = desc->g;
  121. info->seg_not_present = !desc->p;
  122. info->useable = desc->avl;
  123. #ifdef CONFIG_X86_64
  124. info->lm = desc->l;
  125. #endif
  126. }
  127. int do_get_thread_area(struct task_struct *p, int idx,
  128. struct user_desc __user *u_info)
  129. {
  130. struct user_desc info;
  131. if (idx == -1 && get_user(idx, &u_info->entry_number))
  132. return -EFAULT;
  133. if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
  134. return -EINVAL;
  135. fill_user_desc(&info, idx,
  136. &p->thread.tls_array[idx - GDT_ENTRY_TLS_MIN]);
  137. if (copy_to_user(u_info, &info, sizeof(info)))
  138. return -EFAULT;
  139. return 0;
  140. }
  141. SYSCALL_DEFINE1(get_thread_area, struct user_desc __user *, u_info)
  142. {
  143. return do_get_thread_area(current, -1, u_info);
  144. }
  145. int regset_tls_active(struct task_struct *target,
  146. const struct user_regset *regset)
  147. {
  148. struct thread_struct *t = &target->thread;
  149. int n = GDT_ENTRY_TLS_ENTRIES;
  150. while (n > 0 && desc_empty(&t->tls_array[n - 1]))
  151. --n;
  152. return n;
  153. }
  154. int regset_tls_get(struct task_struct *target, const struct user_regset *regset,
  155. unsigned int pos, unsigned int count,
  156. void *kbuf, void __user *ubuf)
  157. {
  158. const struct desc_struct *tls;
  159. if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
  160. (pos % sizeof(struct user_desc)) != 0 ||
  161. (count % sizeof(struct user_desc)) != 0)
  162. return -EINVAL;
  163. pos /= sizeof(struct user_desc);
  164. count /= sizeof(struct user_desc);
  165. tls = &target->thread.tls_array[pos];
  166. if (kbuf) {
  167. struct user_desc *info = kbuf;
  168. while (count-- > 0)
  169. fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++,
  170. tls++);
  171. } else {
  172. struct user_desc __user *u_info = ubuf;
  173. while (count-- > 0) {
  174. struct user_desc info;
  175. fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++);
  176. if (__copy_to_user(u_info++, &info, sizeof(info)))
  177. return -EFAULT;
  178. }
  179. }
  180. return 0;
  181. }
  182. int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
  183. unsigned int pos, unsigned int count,
  184. const void *kbuf, const void __user *ubuf)
  185. {
  186. struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
  187. const struct user_desc *info;
  188. int i;
  189. if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
  190. (pos % sizeof(struct user_desc)) != 0 ||
  191. (count % sizeof(struct user_desc)) != 0)
  192. return -EINVAL;
  193. if (kbuf)
  194. info = kbuf;
  195. else if (__copy_from_user(infobuf, ubuf, count))
  196. return -EFAULT;
  197. else
  198. info = infobuf;
  199. for (i = 0; i < count / sizeof(struct user_desc); i++)
  200. if (!tls_desc_okay(info + i))
  201. return -EINVAL;
  202. set_tls_desc(target,
  203. GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
  204. info, count / sizeof(struct user_desc));
  205. return 0;
  206. }