user.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /*
  2. * The "user cache".
  3. *
  4. * (C) Copyright 1991-2000 Linus Torvalds
  5. *
  6. * We have a per-user structure to keep track of how many
  7. * processes, files etc the user has claimed, in order to be
  8. * able to have per-user limits for system resources.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/bitops.h>
  14. #include <linux/key.h>
  15. #include <linux/sched/user.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/export.h>
  18. #include <linux/user_namespace.h>
  19. #include <linux/proc_ns.h>
  20. /*
  21. * userns count is 1 for root user, 1 for init_uts_ns,
  22. * and 1 for... ?
  23. */
  24. struct user_namespace init_user_ns = {
  25. .uid_map = {
  26. .nr_extents = 1,
  27. .extent[0] = {
  28. .first = 0,
  29. .lower_first = 0,
  30. .count = 4294967295U,
  31. },
  32. },
  33. .gid_map = {
  34. .nr_extents = 1,
  35. .extent[0] = {
  36. .first = 0,
  37. .lower_first = 0,
  38. .count = 4294967295U,
  39. },
  40. },
  41. .projid_map = {
  42. .nr_extents = 1,
  43. .extent[0] = {
  44. .first = 0,
  45. .lower_first = 0,
  46. .count = 4294967295U,
  47. },
  48. },
  49. .count = ATOMIC_INIT(3),
  50. .owner = GLOBAL_ROOT_UID,
  51. .group = GLOBAL_ROOT_GID,
  52. .ns.inum = PROC_USER_INIT_INO,
  53. #ifdef CONFIG_USER_NS
  54. .ns.ops = &userns_operations,
  55. #endif
  56. .flags = USERNS_INIT_FLAGS,
  57. #ifdef CONFIG_PERSISTENT_KEYRINGS
  58. .persistent_keyring_register_sem =
  59. __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
  60. #endif
  61. };
  62. EXPORT_SYMBOL_GPL(init_user_ns);
  63. /*
  64. * UID task count cache, to get fast user lookup in "alloc_uid"
  65. * when changing user ID's (ie setuid() and friends).
  66. */
  67. #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
  68. #define UIDHASH_SZ (1 << UIDHASH_BITS)
  69. #define UIDHASH_MASK (UIDHASH_SZ - 1)
  70. #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
  71. #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
  72. static struct kmem_cache *uid_cachep;
  73. struct hlist_head uidhash_table[UIDHASH_SZ];
  74. /*
  75. * The uidhash_lock is mostly taken from process context, but it is
  76. * occasionally also taken from softirq/tasklet context, when
  77. * task-structs get RCU-freed. Hence all locking must be softirq-safe.
  78. * But free_uid() is also called with local interrupts disabled, and running
  79. * local_bh_enable() with local interrupts disabled is an error - we'll run
  80. * softirq callbacks, and they can unconditionally enable interrupts, and
  81. * the caller of free_uid() didn't expect that..
  82. */
  83. static DEFINE_SPINLOCK(uidhash_lock);
  84. /* root_user.__count is 1, for init task cred */
  85. struct user_struct root_user = {
  86. .__count = ATOMIC_INIT(1),
  87. .processes = ATOMIC_INIT(1),
  88. .sigpending = ATOMIC_INIT(0),
  89. .locked_shm = 0,
  90. .uid = GLOBAL_ROOT_UID,
  91. };
  92. /*
  93. * These routines must be called with the uidhash spinlock held!
  94. */
  95. static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
  96. {
  97. hlist_add_head(&up->uidhash_node, hashent);
  98. }
  99. static void uid_hash_remove(struct user_struct *up)
  100. {
  101. hlist_del_init(&up->uidhash_node);
  102. }
  103. static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
  104. {
  105. struct user_struct *user;
  106. hlist_for_each_entry(user, hashent, uidhash_node) {
  107. if (uid_eq(user->uid, uid)) {
  108. atomic_inc(&user->__count);
  109. return user;
  110. }
  111. }
  112. return NULL;
  113. }
  114. /* IRQs are disabled and uidhash_lock is held upon function entry.
  115. * IRQ state (as stored in flags) is restored and uidhash_lock released
  116. * upon function exit.
  117. */
  118. static void free_user(struct user_struct *up, unsigned long flags)
  119. __releases(&uidhash_lock)
  120. {
  121. uid_hash_remove(up);
  122. spin_unlock_irqrestore(&uidhash_lock, flags);
  123. key_put(up->uid_keyring);
  124. key_put(up->session_keyring);
  125. kmem_cache_free(uid_cachep, up);
  126. }
  127. /*
  128. * Locate the user_struct for the passed UID. If found, take a ref on it. The
  129. * caller must undo that ref with free_uid().
  130. *
  131. * If the user_struct could not be found, return NULL.
  132. */
  133. struct user_struct *find_user(kuid_t uid)
  134. {
  135. struct user_struct *ret;
  136. unsigned long flags;
  137. spin_lock_irqsave(&uidhash_lock, flags);
  138. ret = uid_hash_find(uid, uidhashentry(uid));
  139. spin_unlock_irqrestore(&uidhash_lock, flags);
  140. return ret;
  141. }
  142. void free_uid(struct user_struct *up)
  143. {
  144. unsigned long flags;
  145. if (!up)
  146. return;
  147. local_irq_save(flags);
  148. if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
  149. free_user(up, flags);
  150. else
  151. local_irq_restore(flags);
  152. }
  153. struct user_struct *alloc_uid(kuid_t uid)
  154. {
  155. struct hlist_head *hashent = uidhashentry(uid);
  156. struct user_struct *up, *new;
  157. spin_lock_irq(&uidhash_lock);
  158. up = uid_hash_find(uid, hashent);
  159. spin_unlock_irq(&uidhash_lock);
  160. if (!up) {
  161. new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
  162. if (!new)
  163. goto out_unlock;
  164. new->uid = uid;
  165. atomic_set(&new->__count, 1);
  166. /*
  167. * Before adding this, check whether we raced
  168. * on adding the same user already..
  169. */
  170. spin_lock_irq(&uidhash_lock);
  171. up = uid_hash_find(uid, hashent);
  172. if (up) {
  173. key_put(new->uid_keyring);
  174. key_put(new->session_keyring);
  175. kmem_cache_free(uid_cachep, new);
  176. } else {
  177. uid_hash_insert(new, hashent);
  178. up = new;
  179. }
  180. spin_unlock_irq(&uidhash_lock);
  181. }
  182. return up;
  183. out_unlock:
  184. return NULL;
  185. }
  186. static int __init uid_cache_init(void)
  187. {
  188. int n;
  189. uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
  190. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  191. for(n = 0; n < UIDHASH_SZ; ++n)
  192. INIT_HLIST_HEAD(uidhash_table + n);
  193. /* Insert the root user immediately (init already runs as root) */
  194. spin_lock_irq(&uidhash_lock);
  195. uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
  196. spin_unlock_irq(&uidhash_lock);
  197. return 0;
  198. }
  199. subsys_initcall(uid_cache_init);