user_namespace.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042
  1. /*
  2. * This program is free software; you can redistribute it and/or
  3. * modify it under the terms of the GNU General Public License as
  4. * published by the Free Software Foundation, version 2 of the
  5. * License.
  6. */
  7. #include <linux/export.h>
  8. #include <linux/nsproxy.h>
  9. #include <linux/slab.h>
  10. #include <linux/user_namespace.h>
  11. #include <linux/proc_ns.h>
  12. #include <linux/highuid.h>
  13. #include <linux/cred.h>
  14. #include <linux/securebits.h>
  15. #include <linux/keyctl.h>
  16. #include <linux/key-type.h>
  17. #include <keys/user-type.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/fs.h>
  20. #include <linux/uaccess.h>
  21. #include <linux/ctype.h>
  22. #include <linux/projid.h>
  23. #include <linux/fs_struct.h>
  24. extern bool setup_userns_sysctls(struct user_namespace *ns);
  25. extern void retire_userns_sysctls(struct user_namespace *ns);
  26. static struct kmem_cache *user_ns_cachep __read_mostly;
  27. static DEFINE_MUTEX(userns_state_mutex);
  28. static bool new_idmap_permitted(const struct file *file,
  29. struct user_namespace *ns, int cap_setid,
  30. struct uid_gid_map *map);
  31. static void free_user_ns(struct work_struct *work);
  32. static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
  33. {
  34. /* Start with the same capabilities as init but useless for doing
  35. * anything as the capabilities are bound to the new user namespace.
  36. */
  37. cred->securebits = SECUREBITS_DEFAULT;
  38. cred->cap_inheritable = CAP_EMPTY_SET;
  39. cred->cap_permitted = CAP_FULL_SET;
  40. cred->cap_effective = CAP_FULL_SET;
  41. cred->cap_ambient = CAP_EMPTY_SET;
  42. cred->cap_bset = CAP_FULL_SET;
  43. #ifdef CONFIG_KEYS
  44. key_put(cred->request_key_auth);
  45. cred->request_key_auth = NULL;
  46. #endif
  47. /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
  48. cred->user_ns = user_ns;
  49. }
  50. /*
  51. * Create a new user namespace, deriving the creator from the user in the
  52. * passed credentials, and replacing that user with the new root user for the
  53. * new namespace.
  54. *
  55. * This is called by copy_creds(), which will finish setting the target task's
  56. * credentials.
  57. */
  58. int create_user_ns(struct cred *new)
  59. {
  60. struct user_namespace *ns, *parent_ns = new->user_ns;
  61. kuid_t owner = new->euid;
  62. kgid_t group = new->egid;
  63. int ret;
  64. if (parent_ns->level > 32)
  65. return -EUSERS;
  66. /*
  67. * Verify that we can not violate the policy of which files
  68. * may be accessed that is specified by the root directory,
  69. * by verifing that the root directory is at the root of the
  70. * mount namespace which allows all files to be accessed.
  71. */
  72. if (current_chrooted())
  73. return -EPERM;
  74. /* The creator needs a mapping in the parent user namespace
  75. * or else we won't be able to reasonably tell userspace who
  76. * created a user_namespace.
  77. */
  78. if (!kuid_has_mapping(parent_ns, owner) ||
  79. !kgid_has_mapping(parent_ns, group))
  80. return -EPERM;
  81. ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
  82. if (!ns)
  83. return -ENOMEM;
  84. ret = ns_alloc_inum(&ns->ns);
  85. if (ret) {
  86. kmem_cache_free(user_ns_cachep, ns);
  87. return ret;
  88. }
  89. ns->ns.ops = &userns_operations;
  90. atomic_set(&ns->count, 1);
  91. /* Leave the new->user_ns reference with the new user namespace. */
  92. ns->parent = parent_ns;
  93. ns->level = parent_ns->level + 1;
  94. ns->owner = owner;
  95. ns->group = group;
  96. INIT_WORK(&ns->work, free_user_ns);
  97. /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
  98. mutex_lock(&userns_state_mutex);
  99. ns->flags = parent_ns->flags;
  100. mutex_unlock(&userns_state_mutex);
  101. #ifdef CONFIG_PERSISTENT_KEYRINGS
  102. init_rwsem(&ns->persistent_keyring_register_sem);
  103. #endif
  104. ret = -ENOMEM;
  105. if (!setup_userns_sysctls(ns))
  106. goto fail_keyring;
  107. set_cred_user_ns(new, ns);
  108. return 0;
  109. fail_keyring:
  110. #ifdef CONFIG_PERSISTENT_KEYRINGS
  111. key_put(ns->persistent_keyring_register);
  112. #endif
  113. ns_free_inum(&ns->ns);
  114. kmem_cache_free(user_ns_cachep, ns);
  115. return ret;
  116. }
  117. int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
  118. {
  119. struct cred *cred;
  120. int err = -ENOMEM;
  121. if (!(unshare_flags & CLONE_NEWUSER))
  122. return 0;
  123. cred = prepare_creds();
  124. if (cred) {
  125. err = create_user_ns(cred);
  126. if (err)
  127. put_cred(cred);
  128. else
  129. *new_cred = cred;
  130. }
  131. return err;
  132. }
  133. static void free_user_ns(struct work_struct *work)
  134. {
  135. struct user_namespace *parent, *ns =
  136. container_of(work, struct user_namespace, work);
  137. do {
  138. parent = ns->parent;
  139. retire_userns_sysctls(ns);
  140. #ifdef CONFIG_PERSISTENT_KEYRINGS
  141. key_put(ns->persistent_keyring_register);
  142. #endif
  143. ns_free_inum(&ns->ns);
  144. kmem_cache_free(user_ns_cachep, ns);
  145. ns = parent;
  146. } while (atomic_dec_and_test(&parent->count));
  147. }
  148. void __put_user_ns(struct user_namespace *ns)
  149. {
  150. schedule_work(&ns->work);
  151. }
  152. EXPORT_SYMBOL(__put_user_ns);
  153. static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
  154. {
  155. unsigned idx, extents;
  156. u32 first, last, id2;
  157. id2 = id + count - 1;
  158. /* Find the matching extent */
  159. extents = map->nr_extents;
  160. smp_rmb();
  161. for (idx = 0; idx < extents; idx++) {
  162. first = map->extent[idx].first;
  163. last = first + map->extent[idx].count - 1;
  164. if (id >= first && id <= last &&
  165. (id2 >= first && id2 <= last))
  166. break;
  167. }
  168. /* Map the id or note failure */
  169. if (idx < extents)
  170. id = (id - first) + map->extent[idx].lower_first;
  171. else
  172. id = (u32) -1;
  173. return id;
  174. }
  175. static u32 map_id_down(struct uid_gid_map *map, u32 id)
  176. {
  177. unsigned idx, extents;
  178. u32 first, last;
  179. /* Find the matching extent */
  180. extents = map->nr_extents;
  181. smp_rmb();
  182. for (idx = 0; idx < extents; idx++) {
  183. first = map->extent[idx].first;
  184. last = first + map->extent[idx].count - 1;
  185. if (id >= first && id <= last)
  186. break;
  187. }
  188. /* Map the id or note failure */
  189. if (idx < extents)
  190. id = (id - first) + map->extent[idx].lower_first;
  191. else
  192. id = (u32) -1;
  193. return id;
  194. }
  195. static u32 map_id_up(struct uid_gid_map *map, u32 id)
  196. {
  197. unsigned idx, extents;
  198. u32 first, last;
  199. /* Find the matching extent */
  200. extents = map->nr_extents;
  201. smp_rmb();
  202. for (idx = 0; idx < extents; idx++) {
  203. first = map->extent[idx].lower_first;
  204. last = first + map->extent[idx].count - 1;
  205. if (id >= first && id <= last)
  206. break;
  207. }
  208. /* Map the id or note failure */
  209. if (idx < extents)
  210. id = (id - first) + map->extent[idx].first;
  211. else
  212. id = (u32) -1;
  213. return id;
  214. }
  215. /**
  216. * make_kuid - Map a user-namespace uid pair into a kuid.
  217. * @ns: User namespace that the uid is in
  218. * @uid: User identifier
  219. *
  220. * Maps a user-namespace uid pair into a kernel internal kuid,
  221. * and returns that kuid.
  222. *
  223. * When there is no mapping defined for the user-namespace uid
  224. * pair INVALID_UID is returned. Callers are expected to test
  225. * for and handle INVALID_UID being returned. INVALID_UID
  226. * may be tested for using uid_valid().
  227. */
  228. kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
  229. {
  230. /* Map the uid to a global kernel uid */
  231. return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
  232. }
  233. EXPORT_SYMBOL(make_kuid);
  234. /**
  235. * from_kuid - Create a uid from a kuid user-namespace pair.
  236. * @targ: The user namespace we want a uid in.
  237. * @kuid: The kernel internal uid to start with.
  238. *
  239. * Map @kuid into the user-namespace specified by @targ and
  240. * return the resulting uid.
  241. *
  242. * There is always a mapping into the initial user_namespace.
  243. *
  244. * If @kuid has no mapping in @targ (uid_t)-1 is returned.
  245. */
  246. uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
  247. {
  248. /* Map the uid from a global kernel uid */
  249. return map_id_up(&targ->uid_map, __kuid_val(kuid));
  250. }
  251. EXPORT_SYMBOL(from_kuid);
  252. /**
  253. * from_kuid_munged - Create a uid from a kuid user-namespace pair.
  254. * @targ: The user namespace we want a uid in.
  255. * @kuid: The kernel internal uid to start with.
  256. *
  257. * Map @kuid into the user-namespace specified by @targ and
  258. * return the resulting uid.
  259. *
  260. * There is always a mapping into the initial user_namespace.
  261. *
  262. * Unlike from_kuid from_kuid_munged never fails and always
  263. * returns a valid uid. This makes from_kuid_munged appropriate
  264. * for use in syscalls like stat and getuid where failing the
  265. * system call and failing to provide a valid uid are not an
  266. * options.
  267. *
  268. * If @kuid has no mapping in @targ overflowuid is returned.
  269. */
  270. uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
  271. {
  272. uid_t uid;
  273. uid = from_kuid(targ, kuid);
  274. if (uid == (uid_t) -1)
  275. uid = overflowuid;
  276. return uid;
  277. }
  278. EXPORT_SYMBOL(from_kuid_munged);
  279. /**
  280. * make_kgid - Map a user-namespace gid pair into a kgid.
  281. * @ns: User namespace that the gid is in
  282. * @gid: group identifier
  283. *
  284. * Maps a user-namespace gid pair into a kernel internal kgid,
  285. * and returns that kgid.
  286. *
  287. * When there is no mapping defined for the user-namespace gid
  288. * pair INVALID_GID is returned. Callers are expected to test
  289. * for and handle INVALID_GID being returned. INVALID_GID may be
  290. * tested for using gid_valid().
  291. */
  292. kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
  293. {
  294. /* Map the gid to a global kernel gid */
  295. return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
  296. }
  297. EXPORT_SYMBOL(make_kgid);
  298. /**
  299. * from_kgid - Create a gid from a kgid user-namespace pair.
  300. * @targ: The user namespace we want a gid in.
  301. * @kgid: The kernel internal gid to start with.
  302. *
  303. * Map @kgid into the user-namespace specified by @targ and
  304. * return the resulting gid.
  305. *
  306. * There is always a mapping into the initial user_namespace.
  307. *
  308. * If @kgid has no mapping in @targ (gid_t)-1 is returned.
  309. */
  310. gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
  311. {
  312. /* Map the gid from a global kernel gid */
  313. return map_id_up(&targ->gid_map, __kgid_val(kgid));
  314. }
  315. EXPORT_SYMBOL(from_kgid);
  316. /**
  317. * from_kgid_munged - Create a gid from a kgid user-namespace pair.
  318. * @targ: The user namespace we want a gid in.
  319. * @kgid: The kernel internal gid to start with.
  320. *
  321. * Map @kgid into the user-namespace specified by @targ and
  322. * return the resulting gid.
  323. *
  324. * There is always a mapping into the initial user_namespace.
  325. *
  326. * Unlike from_kgid from_kgid_munged never fails and always
  327. * returns a valid gid. This makes from_kgid_munged appropriate
  328. * for use in syscalls like stat and getgid where failing the
  329. * system call and failing to provide a valid gid are not options.
  330. *
  331. * If @kgid has no mapping in @targ overflowgid is returned.
  332. */
  333. gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
  334. {
  335. gid_t gid;
  336. gid = from_kgid(targ, kgid);
  337. if (gid == (gid_t) -1)
  338. gid = overflowgid;
  339. return gid;
  340. }
  341. EXPORT_SYMBOL(from_kgid_munged);
  342. /**
  343. * make_kprojid - Map a user-namespace projid pair into a kprojid.
  344. * @ns: User namespace that the projid is in
  345. * @projid: Project identifier
  346. *
  347. * Maps a user-namespace uid pair into a kernel internal kuid,
  348. * and returns that kuid.
  349. *
  350. * When there is no mapping defined for the user-namespace projid
  351. * pair INVALID_PROJID is returned. Callers are expected to test
  352. * for and handle handle INVALID_PROJID being returned. INVALID_PROJID
  353. * may be tested for using projid_valid().
  354. */
  355. kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
  356. {
  357. /* Map the uid to a global kernel uid */
  358. return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
  359. }
  360. EXPORT_SYMBOL(make_kprojid);
  361. /**
  362. * from_kprojid - Create a projid from a kprojid user-namespace pair.
  363. * @targ: The user namespace we want a projid in.
  364. * @kprojid: The kernel internal project identifier to start with.
  365. *
  366. * Map @kprojid into the user-namespace specified by @targ and
  367. * return the resulting projid.
  368. *
  369. * There is always a mapping into the initial user_namespace.
  370. *
  371. * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
  372. */
  373. projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
  374. {
  375. /* Map the uid from a global kernel uid */
  376. return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
  377. }
  378. EXPORT_SYMBOL(from_kprojid);
  379. /**
  380. * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
  381. * @targ: The user namespace we want a projid in.
  382. * @kprojid: The kernel internal projid to start with.
  383. *
  384. * Map @kprojid into the user-namespace specified by @targ and
  385. * return the resulting projid.
  386. *
  387. * There is always a mapping into the initial user_namespace.
  388. *
  389. * Unlike from_kprojid from_kprojid_munged never fails and always
  390. * returns a valid projid. This makes from_kprojid_munged
  391. * appropriate for use in syscalls like stat and where
  392. * failing the system call and failing to provide a valid projid are
  393. * not an options.
  394. *
  395. * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
  396. */
  397. projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
  398. {
  399. projid_t projid;
  400. projid = from_kprojid(targ, kprojid);
  401. if (projid == (projid_t) -1)
  402. projid = OVERFLOW_PROJID;
  403. return projid;
  404. }
  405. EXPORT_SYMBOL(from_kprojid_munged);
  406. static int uid_m_show(struct seq_file *seq, void *v)
  407. {
  408. struct user_namespace *ns = seq->private;
  409. struct uid_gid_extent *extent = v;
  410. struct user_namespace *lower_ns;
  411. uid_t lower;
  412. lower_ns = seq_user_ns(seq);
  413. if ((lower_ns == ns) && lower_ns->parent)
  414. lower_ns = lower_ns->parent;
  415. lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
  416. seq_printf(seq, "%10u %10u %10u\n",
  417. extent->first,
  418. lower,
  419. extent->count);
  420. return 0;
  421. }
  422. static int gid_m_show(struct seq_file *seq, void *v)
  423. {
  424. struct user_namespace *ns = seq->private;
  425. struct uid_gid_extent *extent = v;
  426. struct user_namespace *lower_ns;
  427. gid_t lower;
  428. lower_ns = seq_user_ns(seq);
  429. if ((lower_ns == ns) && lower_ns->parent)
  430. lower_ns = lower_ns->parent;
  431. lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
  432. seq_printf(seq, "%10u %10u %10u\n",
  433. extent->first,
  434. lower,
  435. extent->count);
  436. return 0;
  437. }
  438. static int projid_m_show(struct seq_file *seq, void *v)
  439. {
  440. struct user_namespace *ns = seq->private;
  441. struct uid_gid_extent *extent = v;
  442. struct user_namespace *lower_ns;
  443. projid_t lower;
  444. lower_ns = seq_user_ns(seq);
  445. if ((lower_ns == ns) && lower_ns->parent)
  446. lower_ns = lower_ns->parent;
  447. lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
  448. seq_printf(seq, "%10u %10u %10u\n",
  449. extent->first,
  450. lower,
  451. extent->count);
  452. return 0;
  453. }
  454. static void *m_start(struct seq_file *seq, loff_t *ppos,
  455. struct uid_gid_map *map)
  456. {
  457. struct uid_gid_extent *extent = NULL;
  458. loff_t pos = *ppos;
  459. if (pos < map->nr_extents)
  460. extent = &map->extent[pos];
  461. return extent;
  462. }
  463. static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
  464. {
  465. struct user_namespace *ns = seq->private;
  466. return m_start(seq, ppos, &ns->uid_map);
  467. }
  468. static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
  469. {
  470. struct user_namespace *ns = seq->private;
  471. return m_start(seq, ppos, &ns->gid_map);
  472. }
  473. static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
  474. {
  475. struct user_namespace *ns = seq->private;
  476. return m_start(seq, ppos, &ns->projid_map);
  477. }
  478. static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
  479. {
  480. (*pos)++;
  481. return seq->op->start(seq, pos);
  482. }
  483. static void m_stop(struct seq_file *seq, void *v)
  484. {
  485. return;
  486. }
  487. const struct seq_operations proc_uid_seq_operations = {
  488. .start = uid_m_start,
  489. .stop = m_stop,
  490. .next = m_next,
  491. .show = uid_m_show,
  492. };
  493. const struct seq_operations proc_gid_seq_operations = {
  494. .start = gid_m_start,
  495. .stop = m_stop,
  496. .next = m_next,
  497. .show = gid_m_show,
  498. };
  499. const struct seq_operations proc_projid_seq_operations = {
  500. .start = projid_m_start,
  501. .stop = m_stop,
  502. .next = m_next,
  503. .show = projid_m_show,
  504. };
  505. static bool mappings_overlap(struct uid_gid_map *new_map,
  506. struct uid_gid_extent *extent)
  507. {
  508. u32 upper_first, lower_first, upper_last, lower_last;
  509. unsigned idx;
  510. upper_first = extent->first;
  511. lower_first = extent->lower_first;
  512. upper_last = upper_first + extent->count - 1;
  513. lower_last = lower_first + extent->count - 1;
  514. for (idx = 0; idx < new_map->nr_extents; idx++) {
  515. u32 prev_upper_first, prev_lower_first;
  516. u32 prev_upper_last, prev_lower_last;
  517. struct uid_gid_extent *prev;
  518. prev = &new_map->extent[idx];
  519. prev_upper_first = prev->first;
  520. prev_lower_first = prev->lower_first;
  521. prev_upper_last = prev_upper_first + prev->count - 1;
  522. prev_lower_last = prev_lower_first + prev->count - 1;
  523. /* Does the upper range intersect a previous extent? */
  524. if ((prev_upper_first <= upper_last) &&
  525. (prev_upper_last >= upper_first))
  526. return true;
  527. /* Does the lower range intersect a previous extent? */
  528. if ((prev_lower_first <= lower_last) &&
  529. (prev_lower_last >= lower_first))
  530. return true;
  531. }
  532. return false;
  533. }
  534. static ssize_t map_write(struct file *file, const char __user *buf,
  535. size_t count, loff_t *ppos,
  536. int cap_setid,
  537. struct uid_gid_map *map,
  538. struct uid_gid_map *parent_map)
  539. {
  540. struct seq_file *seq = file->private_data;
  541. struct user_namespace *ns = seq->private;
  542. struct uid_gid_map new_map;
  543. unsigned idx;
  544. struct uid_gid_extent *extent = NULL;
  545. char *kbuf = NULL, *pos, *next_line;
  546. ssize_t ret = -EINVAL;
  547. /*
  548. * The userns_state_mutex serializes all writes to any given map.
  549. *
  550. * Any map is only ever written once.
  551. *
  552. * An id map fits within 1 cache line on most architectures.
  553. *
  554. * On read nothing needs to be done unless you are on an
  555. * architecture with a crazy cache coherency model like alpha.
  556. *
  557. * There is a one time data dependency between reading the
  558. * count of the extents and the values of the extents. The
  559. * desired behavior is to see the values of the extents that
  560. * were written before the count of the extents.
  561. *
  562. * To achieve this smp_wmb() is used on guarantee the write
  563. * order and smp_rmb() is guaranteed that we don't have crazy
  564. * architectures returning stale data.
  565. */
  566. mutex_lock(&userns_state_mutex);
  567. ret = -EPERM;
  568. /* Only allow one successful write to the map */
  569. if (map->nr_extents != 0)
  570. goto out;
  571. /*
  572. * Adjusting namespace settings requires capabilities on the target.
  573. */
  574. if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
  575. goto out;
  576. /* Only allow < page size writes at the beginning of the file */
  577. ret = -EINVAL;
  578. if ((*ppos != 0) || (count >= PAGE_SIZE))
  579. goto out;
  580. /* Slurp in the user data */
  581. kbuf = memdup_user_nul(buf, count);
  582. if (IS_ERR(kbuf)) {
  583. ret = PTR_ERR(kbuf);
  584. kbuf = NULL;
  585. goto out;
  586. }
  587. /* Parse the user data */
  588. ret = -EINVAL;
  589. pos = kbuf;
  590. new_map.nr_extents = 0;
  591. for (; pos; pos = next_line) {
  592. extent = &new_map.extent[new_map.nr_extents];
  593. /* Find the end of line and ensure I don't look past it */
  594. next_line = strchr(pos, '\n');
  595. if (next_line) {
  596. *next_line = '\0';
  597. next_line++;
  598. if (*next_line == '\0')
  599. next_line = NULL;
  600. }
  601. pos = skip_spaces(pos);
  602. extent->first = simple_strtoul(pos, &pos, 10);
  603. if (!isspace(*pos))
  604. goto out;
  605. pos = skip_spaces(pos);
  606. extent->lower_first = simple_strtoul(pos, &pos, 10);
  607. if (!isspace(*pos))
  608. goto out;
  609. pos = skip_spaces(pos);
  610. extent->count = simple_strtoul(pos, &pos, 10);
  611. if (*pos && !isspace(*pos))
  612. goto out;
  613. /* Verify there is not trailing junk on the line */
  614. pos = skip_spaces(pos);
  615. if (*pos != '\0')
  616. goto out;
  617. /* Verify we have been given valid starting values */
  618. if ((extent->first == (u32) -1) ||
  619. (extent->lower_first == (u32) -1))
  620. goto out;
  621. /* Verify count is not zero and does not cause the
  622. * extent to wrap
  623. */
  624. if ((extent->first + extent->count) <= extent->first)
  625. goto out;
  626. if ((extent->lower_first + extent->count) <=
  627. extent->lower_first)
  628. goto out;
  629. /* Do the ranges in extent overlap any previous extents? */
  630. if (mappings_overlap(&new_map, extent))
  631. goto out;
  632. new_map.nr_extents++;
  633. /* Fail if the file contains too many extents */
  634. if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
  635. (next_line != NULL))
  636. goto out;
  637. }
  638. /* Be very certaint the new map actually exists */
  639. if (new_map.nr_extents == 0)
  640. goto out;
  641. ret = -EPERM;
  642. /* Validate the user is allowed to use user id's mapped to. */
  643. if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
  644. goto out;
  645. /* Map the lower ids from the parent user namespace to the
  646. * kernel global id space.
  647. */
  648. for (idx = 0; idx < new_map.nr_extents; idx++) {
  649. u32 lower_first;
  650. extent = &new_map.extent[idx];
  651. lower_first = map_id_range_down(parent_map,
  652. extent->lower_first,
  653. extent->count);
  654. /* Fail if we can not map the specified extent to
  655. * the kernel global id space.
  656. */
  657. if (lower_first == (u32) -1)
  658. goto out;
  659. extent->lower_first = lower_first;
  660. }
  661. /* Install the map */
  662. memcpy(map->extent, new_map.extent,
  663. new_map.nr_extents*sizeof(new_map.extent[0]));
  664. smp_wmb();
  665. map->nr_extents = new_map.nr_extents;
  666. *ppos = count;
  667. ret = count;
  668. out:
  669. mutex_unlock(&userns_state_mutex);
  670. kfree(kbuf);
  671. return ret;
  672. }
  673. ssize_t proc_uid_map_write(struct file *file, const char __user *buf,
  674. size_t size, loff_t *ppos)
  675. {
  676. struct seq_file *seq = file->private_data;
  677. struct user_namespace *ns = seq->private;
  678. struct user_namespace *seq_ns = seq_user_ns(seq);
  679. if (!ns->parent)
  680. return -EPERM;
  681. if ((seq_ns != ns) && (seq_ns != ns->parent))
  682. return -EPERM;
  683. return map_write(file, buf, size, ppos, CAP_SETUID,
  684. &ns->uid_map, &ns->parent->uid_map);
  685. }
  686. ssize_t proc_gid_map_write(struct file *file, const char __user *buf,
  687. size_t size, loff_t *ppos)
  688. {
  689. struct seq_file *seq = file->private_data;
  690. struct user_namespace *ns = seq->private;
  691. struct user_namespace *seq_ns = seq_user_ns(seq);
  692. if (!ns->parent)
  693. return -EPERM;
  694. if ((seq_ns != ns) && (seq_ns != ns->parent))
  695. return -EPERM;
  696. return map_write(file, buf, size, ppos, CAP_SETGID,
  697. &ns->gid_map, &ns->parent->gid_map);
  698. }
  699. ssize_t proc_projid_map_write(struct file *file, const char __user *buf,
  700. size_t size, loff_t *ppos)
  701. {
  702. struct seq_file *seq = file->private_data;
  703. struct user_namespace *ns = seq->private;
  704. struct user_namespace *seq_ns = seq_user_ns(seq);
  705. if (!ns->parent)
  706. return -EPERM;
  707. if ((seq_ns != ns) && (seq_ns != ns->parent))
  708. return -EPERM;
  709. /* Anyone can set any valid project id no capability needed */
  710. return map_write(file, buf, size, ppos, -1,
  711. &ns->projid_map, &ns->parent->projid_map);
  712. }
  713. static bool new_idmap_permitted(const struct file *file,
  714. struct user_namespace *ns, int cap_setid,
  715. struct uid_gid_map *new_map)
  716. {
  717. const struct cred *cred = file->f_cred;
  718. /* Don't allow mappings that would allow anything that wouldn't
  719. * be allowed without the establishment of unprivileged mappings.
  720. */
  721. if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
  722. uid_eq(ns->owner, cred->euid)) {
  723. u32 id = new_map->extent[0].lower_first;
  724. if (cap_setid == CAP_SETUID) {
  725. kuid_t uid = make_kuid(ns->parent, id);
  726. if (uid_eq(uid, cred->euid))
  727. return true;
  728. } else if (cap_setid == CAP_SETGID) {
  729. kgid_t gid = make_kgid(ns->parent, id);
  730. if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
  731. gid_eq(gid, cred->egid))
  732. return true;
  733. }
  734. }
  735. /* Allow anyone to set a mapping that doesn't require privilege */
  736. if (!cap_valid(cap_setid))
  737. return true;
  738. /* Allow the specified ids if we have the appropriate capability
  739. * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
  740. * And the opener of the id file also had the approprpiate capability.
  741. */
  742. if (ns_capable(ns->parent, cap_setid) &&
  743. file_ns_capable(file, ns->parent, cap_setid))
  744. return true;
  745. return false;
  746. }
  747. int proc_setgroups_show(struct seq_file *seq, void *v)
  748. {
  749. struct user_namespace *ns = seq->private;
  750. unsigned long userns_flags = ACCESS_ONCE(ns->flags);
  751. seq_printf(seq, "%s\n",
  752. (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
  753. "allow" : "deny");
  754. return 0;
  755. }
  756. ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
  757. size_t count, loff_t *ppos)
  758. {
  759. struct seq_file *seq = file->private_data;
  760. struct user_namespace *ns = seq->private;
  761. char kbuf[8], *pos;
  762. bool setgroups_allowed;
  763. ssize_t ret;
  764. /* Only allow a very narrow range of strings to be written */
  765. ret = -EINVAL;
  766. if ((*ppos != 0) || (count >= sizeof(kbuf)))
  767. goto out;
  768. /* What was written? */
  769. ret = -EFAULT;
  770. if (copy_from_user(kbuf, buf, count))
  771. goto out;
  772. kbuf[count] = '\0';
  773. pos = kbuf;
  774. /* What is being requested? */
  775. ret = -EINVAL;
  776. if (strncmp(pos, "allow", 5) == 0) {
  777. pos += 5;
  778. setgroups_allowed = true;
  779. }
  780. else if (strncmp(pos, "deny", 4) == 0) {
  781. pos += 4;
  782. setgroups_allowed = false;
  783. }
  784. else
  785. goto out;
  786. /* Verify there is not trailing junk on the line */
  787. pos = skip_spaces(pos);
  788. if (*pos != '\0')
  789. goto out;
  790. ret = -EPERM;
  791. mutex_lock(&userns_state_mutex);
  792. if (setgroups_allowed) {
  793. /* Enabling setgroups after setgroups has been disabled
  794. * is not allowed.
  795. */
  796. if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
  797. goto out_unlock;
  798. } else {
  799. /* Permanently disabling setgroups after setgroups has
  800. * been enabled by writing the gid_map is not allowed.
  801. */
  802. if (ns->gid_map.nr_extents != 0)
  803. goto out_unlock;
  804. ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
  805. }
  806. mutex_unlock(&userns_state_mutex);
  807. /* Report a successful write */
  808. *ppos = count;
  809. ret = count;
  810. out:
  811. return ret;
  812. out_unlock:
  813. mutex_unlock(&userns_state_mutex);
  814. goto out;
  815. }
  816. bool userns_may_setgroups(const struct user_namespace *ns)
  817. {
  818. bool allowed;
  819. mutex_lock(&userns_state_mutex);
  820. /* It is not safe to use setgroups until a gid mapping in
  821. * the user namespace has been established.
  822. */
  823. allowed = ns->gid_map.nr_extents != 0;
  824. /* Is setgroups allowed? */
  825. allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
  826. mutex_unlock(&userns_state_mutex);
  827. return allowed;
  828. }
  829. /*
  830. * Returns true if @ns is the same namespace as or a descendant of
  831. * @target_ns.
  832. */
  833. bool current_in_userns(const struct user_namespace *target_ns)
  834. {
  835. struct user_namespace *ns;
  836. for (ns = current_user_ns(); ns; ns = ns->parent) {
  837. if (ns == target_ns)
  838. return true;
  839. }
  840. return false;
  841. }
  842. static inline struct user_namespace *to_user_ns(struct ns_common *ns)
  843. {
  844. return container_of(ns, struct user_namespace, ns);
  845. }
  846. static struct ns_common *userns_get(struct task_struct *task)
  847. {
  848. struct user_namespace *user_ns;
  849. rcu_read_lock();
  850. user_ns = get_user_ns(__task_cred(task)->user_ns);
  851. rcu_read_unlock();
  852. return user_ns ? &user_ns->ns : NULL;
  853. }
  854. static void userns_put(struct ns_common *ns)
  855. {
  856. put_user_ns(to_user_ns(ns));
  857. }
  858. static int userns_install(struct nsproxy *nsproxy, struct ns_common *ns)
  859. {
  860. struct user_namespace *user_ns = to_user_ns(ns);
  861. struct cred *cred;
  862. /* Don't allow gaining capabilities by reentering
  863. * the same user namespace.
  864. */
  865. if (user_ns == current_user_ns())
  866. return -EINVAL;
  867. /* Tasks that share a thread group must share a user namespace */
  868. if (!thread_group_empty(current))
  869. return -EINVAL;
  870. if (current->fs->users != 1)
  871. return -EINVAL;
  872. if (!ns_capable(user_ns, CAP_SYS_ADMIN))
  873. return -EPERM;
  874. cred = prepare_creds();
  875. if (!cred)
  876. return -ENOMEM;
  877. put_user_ns(cred->user_ns);
  878. set_cred_user_ns(cred, get_user_ns(user_ns));
  879. return commit_creds(cred);
  880. }
  881. const struct proc_ns_operations userns_operations = {
  882. .name = "user",
  883. .type = CLONE_NEWUSER,
  884. .get = userns_get,
  885. .put = userns_put,
  886. .install = userns_install,
  887. };
  888. static __init int user_namespaces_init(void)
  889. {
  890. user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
  891. return 0;
  892. }
  893. subsys_initcall(user_namespaces_init);