shm.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407
  1. /*
  2. * linux/ipc/shm.c
  3. * Copyright (C) 1992, 1993 Krishna Balasubramanian
  4. * Many improvements/fixes by Bruno Haible.
  5. * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
  6. * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
  7. *
  8. * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  9. * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
  10. * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
  11. * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
  12. * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
  13. * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
  14. * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
  15. *
  16. * support for audit of ipc object properties and permission changes
  17. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  18. *
  19. * namespaces support
  20. * OpenVZ, SWsoft Inc.
  21. * Pavel Emelianov <xemul@openvz.org>
  22. *
  23. * Better ipc lock (kern_ipc_perm.lock) handling
  24. * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013.
  25. */
  26. #include <linux/slab.h>
  27. #include <linux/mm.h>
  28. #include <linux/hugetlb.h>
  29. #include <linux/shm.h>
  30. #include <linux/init.h>
  31. #include <linux/file.h>
  32. #include <linux/mman.h>
  33. #include <linux/shmem_fs.h>
  34. #include <linux/security.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/audit.h>
  37. #include <linux/capability.h>
  38. #include <linux/ptrace.h>
  39. #include <linux/seq_file.h>
  40. #include <linux/rwsem.h>
  41. #include <linux/nsproxy.h>
  42. #include <linux/mount.h>
  43. #include <linux/ipc_namespace.h>
  44. #include <linux/uaccess.h>
  45. #include "util.h"
  46. struct shm_file_data {
  47. int id;
  48. struct ipc_namespace *ns;
  49. struct file *file;
  50. const struct vm_operations_struct *vm_ops;
  51. };
  52. #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
  53. static const struct file_operations shm_file_operations;
  54. static const struct vm_operations_struct shm_vm_ops;
  55. #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
  56. #define shm_unlock(shp) \
  57. ipc_unlock(&(shp)->shm_perm)
  58. static int newseg(struct ipc_namespace *, struct ipc_params *);
  59. static void shm_open(struct vm_area_struct *vma);
  60. static void shm_close(struct vm_area_struct *vma);
  61. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp);
  62. #ifdef CONFIG_PROC_FS
  63. static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
  64. #endif
  65. void shm_init_ns(struct ipc_namespace *ns)
  66. {
  67. ns->shm_ctlmax = SHMMAX;
  68. ns->shm_ctlall = SHMALL;
  69. ns->shm_ctlmni = SHMMNI;
  70. ns->shm_rmid_forced = 0;
  71. ns->shm_tot = 0;
  72. ipc_init_ids(&shm_ids(ns));
  73. }
  74. /*
  75. * Called with shm_ids.rwsem (writer) and the shp structure locked.
  76. * Only shm_ids.rwsem remains locked on exit.
  77. */
  78. static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  79. {
  80. struct shmid_kernel *shp;
  81. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  82. if (shp->shm_nattch) {
  83. shp->shm_perm.mode |= SHM_DEST;
  84. /* Do not find it any more */
  85. shp->shm_perm.key = IPC_PRIVATE;
  86. shm_unlock(shp);
  87. } else
  88. shm_destroy(ns, shp);
  89. }
  90. #ifdef CONFIG_IPC_NS
  91. void shm_exit_ns(struct ipc_namespace *ns)
  92. {
  93. free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
  94. idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
  95. }
  96. #endif
  97. static int __init ipc_ns_init(void)
  98. {
  99. shm_init_ns(&init_ipc_ns);
  100. return 0;
  101. }
  102. pure_initcall(ipc_ns_init);
  103. void __init shm_init(void)
  104. {
  105. ipc_init_proc_interface("sysvipc/shm",
  106. #if BITS_PER_LONG <= 32
  107. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  108. #else
  109. " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
  110. #endif
  111. IPC_SHM_IDS, sysvipc_shm_proc_show);
  112. }
  113. static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
  114. {
  115. struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id);
  116. if (IS_ERR(ipcp))
  117. return ERR_CAST(ipcp);
  118. return container_of(ipcp, struct shmid_kernel, shm_perm);
  119. }
  120. static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
  121. {
  122. struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
  123. if (IS_ERR(ipcp))
  124. return ERR_CAST(ipcp);
  125. return container_of(ipcp, struct shmid_kernel, shm_perm);
  126. }
  127. /*
  128. * shm_lock_(check_) routines are called in the paths where the rwsem
  129. * is not necessarily held.
  130. */
  131. static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
  132. {
  133. struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
  134. /*
  135. * Callers of shm_lock() must validate the status of the returned ipc
  136. * object pointer (as returned by ipc_lock()), and error out as
  137. * appropriate.
  138. */
  139. if (IS_ERR(ipcp))
  140. return (void *)ipcp;
  141. return container_of(ipcp, struct shmid_kernel, shm_perm);
  142. }
  143. static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
  144. {
  145. rcu_read_lock();
  146. ipc_lock_object(&ipcp->shm_perm);
  147. }
  148. static void shm_rcu_free(struct rcu_head *head)
  149. {
  150. struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
  151. struct shmid_kernel *shp = ipc_rcu_to_struct(p);
  152. security_shm_free(shp);
  153. ipc_rcu_free(head);
  154. }
  155. static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
  156. {
  157. list_del(&s->shm_clist);
  158. ipc_rmid(&shm_ids(ns), &s->shm_perm);
  159. }
  160. static int __shm_open(struct vm_area_struct *vma)
  161. {
  162. struct file *file = vma->vm_file;
  163. struct shm_file_data *sfd = shm_file_data(file);
  164. struct shmid_kernel *shp;
  165. shp = shm_lock(sfd->ns, sfd->id);
  166. if (IS_ERR(shp))
  167. return PTR_ERR(shp);
  168. shp->shm_atim = get_seconds();
  169. shp->shm_lprid = task_tgid_vnr(current);
  170. shp->shm_nattch++;
  171. shm_unlock(shp);
  172. return 0;
  173. }
  174. /* This is called by fork, once for every shm attach. */
  175. static void shm_open(struct vm_area_struct *vma)
  176. {
  177. int err = __shm_open(vma);
  178. /*
  179. * We raced in the idr lookup or with shm_destroy().
  180. * Either way, the ID is busted.
  181. */
  182. WARN_ON_ONCE(err);
  183. }
  184. /*
  185. * shm_destroy - free the struct shmid_kernel
  186. *
  187. * @ns: namespace
  188. * @shp: struct to free
  189. *
  190. * It has to be called with shp and shm_ids.rwsem (writer) locked,
  191. * but returns with shp unlocked and freed.
  192. */
  193. static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  194. {
  195. struct file *shm_file;
  196. shm_file = shp->shm_file;
  197. shp->shm_file = NULL;
  198. ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
  199. shm_rmid(ns, shp);
  200. shm_unlock(shp);
  201. if (!is_file_hugepages(shm_file))
  202. shmem_lock(shm_file, 0, shp->mlock_user);
  203. else if (shp->mlock_user)
  204. user_shm_unlock(i_size_read(file_inode(shm_file)),
  205. shp->mlock_user);
  206. fput(shm_file);
  207. ipc_rcu_putref(shp, shm_rcu_free);
  208. }
  209. /*
  210. * shm_may_destroy - identifies whether shm segment should be destroyed now
  211. *
  212. * Returns true if and only if there are no active users of the segment and
  213. * one of the following is true:
  214. *
  215. * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
  216. *
  217. * 2) sysctl kernel.shm_rmid_forced is set to 1.
  218. */
  219. static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
  220. {
  221. return (shp->shm_nattch == 0) &&
  222. (ns->shm_rmid_forced ||
  223. (shp->shm_perm.mode & SHM_DEST));
  224. }
  225. /*
  226. * remove the attach descriptor vma.
  227. * free memory for segment if it is marked destroyed.
  228. * The descriptor has already been removed from the current->mm->mmap list
  229. * and will later be kfree()d.
  230. */
  231. static void shm_close(struct vm_area_struct *vma)
  232. {
  233. struct file *file = vma->vm_file;
  234. struct shm_file_data *sfd = shm_file_data(file);
  235. struct shmid_kernel *shp;
  236. struct ipc_namespace *ns = sfd->ns;
  237. down_write(&shm_ids(ns).rwsem);
  238. /* remove from the list of attaches of the shm segment */
  239. shp = shm_lock(ns, sfd->id);
  240. /*
  241. * We raced in the idr lookup or with shm_destroy().
  242. * Either way, the ID is busted.
  243. */
  244. if (WARN_ON_ONCE(IS_ERR(shp)))
  245. goto done; /* no-op */
  246. shp->shm_lprid = task_tgid_vnr(current);
  247. shp->shm_dtim = get_seconds();
  248. shp->shm_nattch--;
  249. if (shm_may_destroy(ns, shp))
  250. shm_destroy(ns, shp);
  251. else
  252. shm_unlock(shp);
  253. done:
  254. up_write(&shm_ids(ns).rwsem);
  255. }
  256. /* Called with ns->shm_ids(ns).rwsem locked */
  257. static int shm_try_destroy_orphaned(int id, void *p, void *data)
  258. {
  259. struct ipc_namespace *ns = data;
  260. struct kern_ipc_perm *ipcp = p;
  261. struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  262. /*
  263. * We want to destroy segments without users and with already
  264. * exit'ed originating process.
  265. *
  266. * As shp->* are changed under rwsem, it's safe to skip shp locking.
  267. */
  268. if (shp->shm_creator != NULL)
  269. return 0;
  270. if (shm_may_destroy(ns, shp)) {
  271. shm_lock_by_ptr(shp);
  272. shm_destroy(ns, shp);
  273. }
  274. return 0;
  275. }
  276. void shm_destroy_orphaned(struct ipc_namespace *ns)
  277. {
  278. down_write(&shm_ids(ns).rwsem);
  279. if (shm_ids(ns).in_use)
  280. idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
  281. up_write(&shm_ids(ns).rwsem);
  282. }
  283. /* Locking assumes this will only be called with task == current */
  284. void exit_shm(struct task_struct *task)
  285. {
  286. struct ipc_namespace *ns = task->nsproxy->ipc_ns;
  287. struct shmid_kernel *shp, *n;
  288. if (list_empty(&task->sysvshm.shm_clist))
  289. return;
  290. /*
  291. * If kernel.shm_rmid_forced is not set then only keep track of
  292. * which shmids are orphaned, so that a later set of the sysctl
  293. * can clean them up.
  294. */
  295. if (!ns->shm_rmid_forced) {
  296. down_read(&shm_ids(ns).rwsem);
  297. list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist)
  298. shp->shm_creator = NULL;
  299. /*
  300. * Only under read lock but we are only called on current
  301. * so no entry on the list will be shared.
  302. */
  303. list_del(&task->sysvshm.shm_clist);
  304. up_read(&shm_ids(ns).rwsem);
  305. return;
  306. }
  307. /*
  308. * Destroy all already created segments, that were not yet mapped,
  309. * and mark any mapped as orphan to cover the sysctl toggling.
  310. * Destroy is skipped if shm_may_destroy() returns false.
  311. */
  312. down_write(&shm_ids(ns).rwsem);
  313. list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) {
  314. shp->shm_creator = NULL;
  315. if (shm_may_destroy(ns, shp)) {
  316. shm_lock_by_ptr(shp);
  317. shm_destroy(ns, shp);
  318. }
  319. }
  320. /* Remove the list head from any segments still attached. */
  321. list_del(&task->sysvshm.shm_clist);
  322. up_write(&shm_ids(ns).rwsem);
  323. }
  324. static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  325. {
  326. struct file *file = vma->vm_file;
  327. struct shm_file_data *sfd = shm_file_data(file);
  328. return sfd->vm_ops->fault(vma, vmf);
  329. }
  330. #ifdef CONFIG_NUMA
  331. static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
  332. {
  333. struct file *file = vma->vm_file;
  334. struct shm_file_data *sfd = shm_file_data(file);
  335. int err = 0;
  336. if (sfd->vm_ops->set_policy)
  337. err = sfd->vm_ops->set_policy(vma, new);
  338. return err;
  339. }
  340. static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
  341. unsigned long addr)
  342. {
  343. struct file *file = vma->vm_file;
  344. struct shm_file_data *sfd = shm_file_data(file);
  345. struct mempolicy *pol = NULL;
  346. if (sfd->vm_ops->get_policy)
  347. pol = sfd->vm_ops->get_policy(vma, addr);
  348. else if (vma->vm_policy)
  349. pol = vma->vm_policy;
  350. return pol;
  351. }
  352. #endif
  353. static int shm_mmap(struct file *file, struct vm_area_struct *vma)
  354. {
  355. struct shm_file_data *sfd = shm_file_data(file);
  356. int ret;
  357. /*
  358. * In case of remap_file_pages() emulation, the file can represent
  359. * removed IPC ID: propogate shm_lock() error to caller.
  360. */
  361. ret =__shm_open(vma);
  362. if (ret)
  363. return ret;
  364. ret = sfd->file->f_op->mmap(sfd->file, vma);
  365. if (ret) {
  366. shm_close(vma);
  367. return ret;
  368. }
  369. sfd->vm_ops = vma->vm_ops;
  370. #ifdef CONFIG_MMU
  371. WARN_ON(!sfd->vm_ops->fault);
  372. #endif
  373. vma->vm_ops = &shm_vm_ops;
  374. return 0;
  375. }
  376. static int shm_release(struct inode *ino, struct file *file)
  377. {
  378. struct shm_file_data *sfd = shm_file_data(file);
  379. put_ipc_ns(sfd->ns);
  380. shm_file_data(file) = NULL;
  381. kfree(sfd);
  382. return 0;
  383. }
  384. static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
  385. {
  386. struct shm_file_data *sfd = shm_file_data(file);
  387. if (!sfd->file->f_op->fsync)
  388. return -EINVAL;
  389. return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
  390. }
  391. static long shm_fallocate(struct file *file, int mode, loff_t offset,
  392. loff_t len)
  393. {
  394. struct shm_file_data *sfd = shm_file_data(file);
  395. if (!sfd->file->f_op->fallocate)
  396. return -EOPNOTSUPP;
  397. return sfd->file->f_op->fallocate(file, mode, offset, len);
  398. }
  399. static unsigned long shm_get_unmapped_area(struct file *file,
  400. unsigned long addr, unsigned long len, unsigned long pgoff,
  401. unsigned long flags)
  402. {
  403. struct shm_file_data *sfd = shm_file_data(file);
  404. return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
  405. pgoff, flags);
  406. }
  407. static const struct file_operations shm_file_operations = {
  408. .mmap = shm_mmap,
  409. .fsync = shm_fsync,
  410. .release = shm_release,
  411. #ifndef CONFIG_MMU
  412. .get_unmapped_area = shm_get_unmapped_area,
  413. #endif
  414. .llseek = noop_llseek,
  415. .fallocate = shm_fallocate,
  416. };
  417. static const struct file_operations shm_file_operations_huge = {
  418. .mmap = shm_mmap,
  419. .fsync = shm_fsync,
  420. .release = shm_release,
  421. .get_unmapped_area = shm_get_unmapped_area,
  422. .llseek = noop_llseek,
  423. .fallocate = shm_fallocate,
  424. };
  425. bool is_file_shm_hugepages(struct file *file)
  426. {
  427. return file->f_op == &shm_file_operations_huge;
  428. }
  429. static const struct vm_operations_struct shm_vm_ops = {
  430. .open = shm_open, /* callback for a new vm-area open */
  431. .close = shm_close, /* callback for when the vm-area is released */
  432. .fault = shm_fault,
  433. #if defined(CONFIG_NUMA)
  434. .set_policy = shm_set_policy,
  435. .get_policy = shm_get_policy,
  436. #endif
  437. };
  438. /**
  439. * newseg - Create a new shared memory segment
  440. * @ns: namespace
  441. * @params: ptr to the structure that contains key, size and shmflg
  442. *
  443. * Called with shm_ids.rwsem held as a writer.
  444. */
  445. static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
  446. {
  447. key_t key = params->key;
  448. int shmflg = params->flg;
  449. size_t size = params->u.size;
  450. int error;
  451. struct shmid_kernel *shp;
  452. size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  453. struct file *file;
  454. char name[13];
  455. int id;
  456. vm_flags_t acctflag = 0;
  457. if (size < SHMMIN || size > ns->shm_ctlmax)
  458. return -EINVAL;
  459. if (numpages << PAGE_SHIFT < size)
  460. return -ENOSPC;
  461. if (ns->shm_tot + numpages < ns->shm_tot ||
  462. ns->shm_tot + numpages > ns->shm_ctlall)
  463. return -ENOSPC;
  464. shp = ipc_rcu_alloc(sizeof(*shp));
  465. if (!shp)
  466. return -ENOMEM;
  467. shp->shm_perm.key = key;
  468. shp->shm_perm.mode = (shmflg & S_IRWXUGO);
  469. shp->mlock_user = NULL;
  470. shp->shm_perm.security = NULL;
  471. error = security_shm_alloc(shp);
  472. if (error) {
  473. ipc_rcu_putref(shp, ipc_rcu_free);
  474. return error;
  475. }
  476. sprintf(name, "SYSV%08x", key);
  477. if (shmflg & SHM_HUGETLB) {
  478. struct hstate *hs;
  479. size_t hugesize;
  480. hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
  481. if (!hs) {
  482. error = -EINVAL;
  483. goto no_file;
  484. }
  485. hugesize = ALIGN(size, huge_page_size(hs));
  486. /* hugetlb_file_setup applies strict accounting */
  487. if (shmflg & SHM_NORESERVE)
  488. acctflag = VM_NORESERVE;
  489. file = hugetlb_file_setup(name, hugesize, acctflag,
  490. &shp->mlock_user, HUGETLB_SHMFS_INODE,
  491. (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
  492. } else {
  493. /*
  494. * Do not allow no accounting for OVERCOMMIT_NEVER, even
  495. * if it's asked for.
  496. */
  497. if ((shmflg & SHM_NORESERVE) &&
  498. sysctl_overcommit_memory != OVERCOMMIT_NEVER)
  499. acctflag = VM_NORESERVE;
  500. file = shmem_kernel_file_setup(name, size, acctflag);
  501. }
  502. error = PTR_ERR(file);
  503. if (IS_ERR(file))
  504. goto no_file;
  505. shp->shm_cprid = task_tgid_vnr(current);
  506. shp->shm_lprid = 0;
  507. shp->shm_atim = shp->shm_dtim = 0;
  508. shp->shm_ctim = get_seconds();
  509. shp->shm_segsz = size;
  510. shp->shm_nattch = 0;
  511. shp->shm_file = file;
  512. shp->shm_creator = current;
  513. id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
  514. if (id < 0) {
  515. error = id;
  516. goto no_id;
  517. }
  518. list_add(&shp->shm_clist, &current->sysvshm.shm_clist);
  519. /*
  520. * shmid gets reported as "inode#" in /proc/pid/maps.
  521. * proc-ps tools use this. Changing this will break them.
  522. */
  523. file_inode(file)->i_ino = shp->shm_perm.id;
  524. ns->shm_tot += numpages;
  525. error = shp->shm_perm.id;
  526. ipc_unlock_object(&shp->shm_perm);
  527. rcu_read_unlock();
  528. return error;
  529. no_id:
  530. if (is_file_hugepages(file) && shp->mlock_user)
  531. user_shm_unlock(size, shp->mlock_user);
  532. fput(file);
  533. no_file:
  534. ipc_rcu_putref(shp, shm_rcu_free);
  535. return error;
  536. }
  537. /*
  538. * Called with shm_ids.rwsem and ipcp locked.
  539. */
  540. static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
  541. {
  542. struct shmid_kernel *shp;
  543. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  544. return security_shm_associate(shp, shmflg);
  545. }
  546. /*
  547. * Called with shm_ids.rwsem and ipcp locked.
  548. */
  549. static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
  550. struct ipc_params *params)
  551. {
  552. struct shmid_kernel *shp;
  553. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  554. if (shp->shm_segsz < params->u.size)
  555. return -EINVAL;
  556. return 0;
  557. }
  558. SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
  559. {
  560. struct ipc_namespace *ns;
  561. static const struct ipc_ops shm_ops = {
  562. .getnew = newseg,
  563. .associate = shm_security,
  564. .more_checks = shm_more_checks,
  565. };
  566. struct ipc_params shm_params;
  567. ns = current->nsproxy->ipc_ns;
  568. shm_params.key = key;
  569. shm_params.flg = shmflg;
  570. shm_params.u.size = size;
  571. return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
  572. }
  573. static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
  574. {
  575. switch (version) {
  576. case IPC_64:
  577. return copy_to_user(buf, in, sizeof(*in));
  578. case IPC_OLD:
  579. {
  580. struct shmid_ds out;
  581. memset(&out, 0, sizeof(out));
  582. ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
  583. out.shm_segsz = in->shm_segsz;
  584. out.shm_atime = in->shm_atime;
  585. out.shm_dtime = in->shm_dtime;
  586. out.shm_ctime = in->shm_ctime;
  587. out.shm_cpid = in->shm_cpid;
  588. out.shm_lpid = in->shm_lpid;
  589. out.shm_nattch = in->shm_nattch;
  590. return copy_to_user(buf, &out, sizeof(out));
  591. }
  592. default:
  593. return -EINVAL;
  594. }
  595. }
  596. static inline unsigned long
  597. copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
  598. {
  599. switch (version) {
  600. case IPC_64:
  601. if (copy_from_user(out, buf, sizeof(*out)))
  602. return -EFAULT;
  603. return 0;
  604. case IPC_OLD:
  605. {
  606. struct shmid_ds tbuf_old;
  607. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  608. return -EFAULT;
  609. out->shm_perm.uid = tbuf_old.shm_perm.uid;
  610. out->shm_perm.gid = tbuf_old.shm_perm.gid;
  611. out->shm_perm.mode = tbuf_old.shm_perm.mode;
  612. return 0;
  613. }
  614. default:
  615. return -EINVAL;
  616. }
  617. }
  618. static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
  619. {
  620. switch (version) {
  621. case IPC_64:
  622. return copy_to_user(buf, in, sizeof(*in));
  623. case IPC_OLD:
  624. {
  625. struct shminfo out;
  626. if (in->shmmax > INT_MAX)
  627. out.shmmax = INT_MAX;
  628. else
  629. out.shmmax = (int)in->shmmax;
  630. out.shmmin = in->shmmin;
  631. out.shmmni = in->shmmni;
  632. out.shmseg = in->shmseg;
  633. out.shmall = in->shmall;
  634. return copy_to_user(buf, &out, sizeof(out));
  635. }
  636. default:
  637. return -EINVAL;
  638. }
  639. }
  640. /*
  641. * Calculate and add used RSS and swap pages of a shm.
  642. * Called with shm_ids.rwsem held as a reader
  643. */
  644. static void shm_add_rss_swap(struct shmid_kernel *shp,
  645. unsigned long *rss_add, unsigned long *swp_add)
  646. {
  647. struct inode *inode;
  648. inode = file_inode(shp->shm_file);
  649. if (is_file_hugepages(shp->shm_file)) {
  650. struct address_space *mapping = inode->i_mapping;
  651. struct hstate *h = hstate_file(shp->shm_file);
  652. *rss_add += pages_per_huge_page(h) * mapping->nrpages;
  653. } else {
  654. #ifdef CONFIG_SHMEM
  655. struct shmem_inode_info *info = SHMEM_I(inode);
  656. spin_lock(&info->lock);
  657. *rss_add += inode->i_mapping->nrpages;
  658. *swp_add += info->swapped;
  659. spin_unlock(&info->lock);
  660. #else
  661. *rss_add += inode->i_mapping->nrpages;
  662. #endif
  663. }
  664. }
  665. /*
  666. * Called with shm_ids.rwsem held as a reader
  667. */
  668. static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
  669. unsigned long *swp)
  670. {
  671. int next_id;
  672. int total, in_use;
  673. *rss = 0;
  674. *swp = 0;
  675. in_use = shm_ids(ns).in_use;
  676. for (total = 0, next_id = 0; total < in_use; next_id++) {
  677. struct kern_ipc_perm *ipc;
  678. struct shmid_kernel *shp;
  679. ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
  680. if (ipc == NULL)
  681. continue;
  682. shp = container_of(ipc, struct shmid_kernel, shm_perm);
  683. shm_add_rss_swap(shp, rss, swp);
  684. total++;
  685. }
  686. }
  687. /*
  688. * This function handles some shmctl commands which require the rwsem
  689. * to be held in write mode.
  690. * NOTE: no locks must be held, the rwsem is taken inside this function.
  691. */
  692. static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
  693. struct shmid_ds __user *buf, int version)
  694. {
  695. struct kern_ipc_perm *ipcp;
  696. struct shmid64_ds shmid64;
  697. struct shmid_kernel *shp;
  698. int err;
  699. if (cmd == IPC_SET) {
  700. if (copy_shmid_from_user(&shmid64, buf, version))
  701. return -EFAULT;
  702. }
  703. down_write(&shm_ids(ns).rwsem);
  704. rcu_read_lock();
  705. ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
  706. &shmid64.shm_perm, 0);
  707. if (IS_ERR(ipcp)) {
  708. err = PTR_ERR(ipcp);
  709. goto out_unlock1;
  710. }
  711. shp = container_of(ipcp, struct shmid_kernel, shm_perm);
  712. err = security_shm_shmctl(shp, cmd);
  713. if (err)
  714. goto out_unlock1;
  715. switch (cmd) {
  716. case IPC_RMID:
  717. ipc_lock_object(&shp->shm_perm);
  718. /* do_shm_rmid unlocks the ipc object and rcu */
  719. do_shm_rmid(ns, ipcp);
  720. goto out_up;
  721. case IPC_SET:
  722. ipc_lock_object(&shp->shm_perm);
  723. err = ipc_update_perm(&shmid64.shm_perm, ipcp);
  724. if (err)
  725. goto out_unlock0;
  726. shp->shm_ctim = get_seconds();
  727. break;
  728. default:
  729. err = -EINVAL;
  730. goto out_unlock1;
  731. }
  732. out_unlock0:
  733. ipc_unlock_object(&shp->shm_perm);
  734. out_unlock1:
  735. rcu_read_unlock();
  736. out_up:
  737. up_write(&shm_ids(ns).rwsem);
  738. return err;
  739. }
  740. static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
  741. int cmd, int version, void __user *buf)
  742. {
  743. int err;
  744. struct shmid_kernel *shp;
  745. /* preliminary security checks for *_INFO */
  746. if (cmd == IPC_INFO || cmd == SHM_INFO) {
  747. err = security_shm_shmctl(NULL, cmd);
  748. if (err)
  749. return err;
  750. }
  751. switch (cmd) {
  752. case IPC_INFO:
  753. {
  754. struct shminfo64 shminfo;
  755. memset(&shminfo, 0, sizeof(shminfo));
  756. shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
  757. shminfo.shmmax = ns->shm_ctlmax;
  758. shminfo.shmall = ns->shm_ctlall;
  759. shminfo.shmmin = SHMMIN;
  760. if (copy_shminfo_to_user(buf, &shminfo, version))
  761. return -EFAULT;
  762. down_read(&shm_ids(ns).rwsem);
  763. err = ipc_get_maxid(&shm_ids(ns));
  764. up_read(&shm_ids(ns).rwsem);
  765. if (err < 0)
  766. err = 0;
  767. goto out;
  768. }
  769. case SHM_INFO:
  770. {
  771. struct shm_info shm_info;
  772. memset(&shm_info, 0, sizeof(shm_info));
  773. down_read(&shm_ids(ns).rwsem);
  774. shm_info.used_ids = shm_ids(ns).in_use;
  775. shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp);
  776. shm_info.shm_tot = ns->shm_tot;
  777. shm_info.swap_attempts = 0;
  778. shm_info.swap_successes = 0;
  779. err = ipc_get_maxid(&shm_ids(ns));
  780. up_read(&shm_ids(ns).rwsem);
  781. if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
  782. err = -EFAULT;
  783. goto out;
  784. }
  785. err = err < 0 ? 0 : err;
  786. goto out;
  787. }
  788. case SHM_STAT:
  789. case IPC_STAT:
  790. {
  791. struct shmid64_ds tbuf;
  792. int result;
  793. rcu_read_lock();
  794. if (cmd == SHM_STAT) {
  795. shp = shm_obtain_object(ns, shmid);
  796. if (IS_ERR(shp)) {
  797. err = PTR_ERR(shp);
  798. goto out_unlock;
  799. }
  800. result = shp->shm_perm.id;
  801. } else {
  802. shp = shm_obtain_object_check(ns, shmid);
  803. if (IS_ERR(shp)) {
  804. err = PTR_ERR(shp);
  805. goto out_unlock;
  806. }
  807. result = 0;
  808. }
  809. err = -EACCES;
  810. if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
  811. goto out_unlock;
  812. err = security_shm_shmctl(shp, cmd);
  813. if (err)
  814. goto out_unlock;
  815. memset(&tbuf, 0, sizeof(tbuf));
  816. kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
  817. tbuf.shm_segsz = shp->shm_segsz;
  818. tbuf.shm_atime = shp->shm_atim;
  819. tbuf.shm_dtime = shp->shm_dtim;
  820. tbuf.shm_ctime = shp->shm_ctim;
  821. tbuf.shm_cpid = shp->shm_cprid;
  822. tbuf.shm_lpid = shp->shm_lprid;
  823. tbuf.shm_nattch = shp->shm_nattch;
  824. rcu_read_unlock();
  825. if (copy_shmid_to_user(buf, &tbuf, version))
  826. err = -EFAULT;
  827. else
  828. err = result;
  829. goto out;
  830. }
  831. default:
  832. return -EINVAL;
  833. }
  834. out_unlock:
  835. rcu_read_unlock();
  836. out:
  837. return err;
  838. }
  839. SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
  840. {
  841. struct shmid_kernel *shp;
  842. int err, version;
  843. struct ipc_namespace *ns;
  844. if (cmd < 0 || shmid < 0)
  845. return -EINVAL;
  846. version = ipc_parse_version(&cmd);
  847. ns = current->nsproxy->ipc_ns;
  848. switch (cmd) {
  849. case IPC_INFO:
  850. case SHM_INFO:
  851. case SHM_STAT:
  852. case IPC_STAT:
  853. return shmctl_nolock(ns, shmid, cmd, version, buf);
  854. case IPC_RMID:
  855. case IPC_SET:
  856. return shmctl_down(ns, shmid, cmd, buf, version);
  857. case SHM_LOCK:
  858. case SHM_UNLOCK:
  859. {
  860. struct file *shm_file;
  861. rcu_read_lock();
  862. shp = shm_obtain_object_check(ns, shmid);
  863. if (IS_ERR(shp)) {
  864. err = PTR_ERR(shp);
  865. goto out_unlock1;
  866. }
  867. audit_ipc_obj(&(shp->shm_perm));
  868. err = security_shm_shmctl(shp, cmd);
  869. if (err)
  870. goto out_unlock1;
  871. ipc_lock_object(&shp->shm_perm);
  872. /* check if shm_destroy() is tearing down shp */
  873. if (!ipc_valid_object(&shp->shm_perm)) {
  874. err = -EIDRM;
  875. goto out_unlock0;
  876. }
  877. if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
  878. kuid_t euid = current_euid();
  879. if (!uid_eq(euid, shp->shm_perm.uid) &&
  880. !uid_eq(euid, shp->shm_perm.cuid)) {
  881. err = -EPERM;
  882. goto out_unlock0;
  883. }
  884. if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) {
  885. err = -EPERM;
  886. goto out_unlock0;
  887. }
  888. }
  889. shm_file = shp->shm_file;
  890. if (is_file_hugepages(shm_file))
  891. goto out_unlock0;
  892. if (cmd == SHM_LOCK) {
  893. struct user_struct *user = current_user();
  894. err = shmem_lock(shm_file, 1, user);
  895. if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
  896. shp->shm_perm.mode |= SHM_LOCKED;
  897. shp->mlock_user = user;
  898. }
  899. goto out_unlock0;
  900. }
  901. /* SHM_UNLOCK */
  902. if (!(shp->shm_perm.mode & SHM_LOCKED))
  903. goto out_unlock0;
  904. shmem_lock(shm_file, 0, shp->mlock_user);
  905. shp->shm_perm.mode &= ~SHM_LOCKED;
  906. shp->mlock_user = NULL;
  907. get_file(shm_file);
  908. ipc_unlock_object(&shp->shm_perm);
  909. rcu_read_unlock();
  910. shmem_unlock_mapping(shm_file->f_mapping);
  911. fput(shm_file);
  912. return err;
  913. }
  914. default:
  915. return -EINVAL;
  916. }
  917. out_unlock0:
  918. ipc_unlock_object(&shp->shm_perm);
  919. out_unlock1:
  920. rcu_read_unlock();
  921. return err;
  922. }
  923. /*
  924. * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
  925. *
  926. * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
  927. * "raddr" thing points to kernel space, and there has to be a wrapper around
  928. * this.
  929. */
  930. long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
  931. unsigned long shmlba)
  932. {
  933. struct shmid_kernel *shp;
  934. unsigned long addr;
  935. unsigned long size;
  936. struct file *file;
  937. int err;
  938. unsigned long flags;
  939. unsigned long prot;
  940. int acc_mode;
  941. struct ipc_namespace *ns;
  942. struct shm_file_data *sfd;
  943. struct path path;
  944. fmode_t f_mode;
  945. unsigned long populate = 0;
  946. err = -EINVAL;
  947. if (shmid < 0)
  948. goto out;
  949. else if ((addr = (ulong)shmaddr)) {
  950. if (addr & (shmlba - 1)) {
  951. if (shmflg & SHM_RND)
  952. addr &= ~(shmlba - 1); /* round down */
  953. else
  954. #ifndef __ARCH_FORCE_SHMLBA
  955. if (addr & ~PAGE_MASK)
  956. #endif
  957. goto out;
  958. }
  959. flags = MAP_SHARED | MAP_FIXED;
  960. } else {
  961. if ((shmflg & SHM_REMAP))
  962. goto out;
  963. flags = MAP_SHARED;
  964. }
  965. if (shmflg & SHM_RDONLY) {
  966. prot = PROT_READ;
  967. acc_mode = S_IRUGO;
  968. f_mode = FMODE_READ;
  969. } else {
  970. prot = PROT_READ | PROT_WRITE;
  971. acc_mode = S_IRUGO | S_IWUGO;
  972. f_mode = FMODE_READ | FMODE_WRITE;
  973. }
  974. if (shmflg & SHM_EXEC) {
  975. prot |= PROT_EXEC;
  976. acc_mode |= S_IXUGO;
  977. }
  978. /*
  979. * We cannot rely on the fs check since SYSV IPC does have an
  980. * additional creator id...
  981. */
  982. ns = current->nsproxy->ipc_ns;
  983. rcu_read_lock();
  984. shp = shm_obtain_object_check(ns, shmid);
  985. if (IS_ERR(shp)) {
  986. err = PTR_ERR(shp);
  987. goto out_unlock;
  988. }
  989. err = -EACCES;
  990. if (ipcperms(ns, &shp->shm_perm, acc_mode))
  991. goto out_unlock;
  992. err = security_shm_shmat(shp, shmaddr, shmflg);
  993. if (err)
  994. goto out_unlock;
  995. ipc_lock_object(&shp->shm_perm);
  996. /* check if shm_destroy() is tearing down shp */
  997. if (!ipc_valid_object(&shp->shm_perm)) {
  998. ipc_unlock_object(&shp->shm_perm);
  999. err = -EIDRM;
  1000. goto out_unlock;
  1001. }
  1002. path = shp->shm_file->f_path;
  1003. path_get(&path);
  1004. shp->shm_nattch++;
  1005. size = i_size_read(d_inode(path.dentry));
  1006. ipc_unlock_object(&shp->shm_perm);
  1007. rcu_read_unlock();
  1008. err = -ENOMEM;
  1009. sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
  1010. if (!sfd) {
  1011. path_put(&path);
  1012. goto out_nattch;
  1013. }
  1014. file = alloc_file(&path, f_mode,
  1015. is_file_hugepages(shp->shm_file) ?
  1016. &shm_file_operations_huge :
  1017. &shm_file_operations);
  1018. err = PTR_ERR(file);
  1019. if (IS_ERR(file)) {
  1020. kfree(sfd);
  1021. path_put(&path);
  1022. goto out_nattch;
  1023. }
  1024. file->private_data = sfd;
  1025. file->f_mapping = shp->shm_file->f_mapping;
  1026. sfd->id = shp->shm_perm.id;
  1027. sfd->ns = get_ipc_ns(ns);
  1028. sfd->file = shp->shm_file;
  1029. sfd->vm_ops = NULL;
  1030. err = security_mmap_file(file, prot, flags);
  1031. if (err)
  1032. goto out_fput;
  1033. if (down_write_killable(&current->mm->mmap_sem)) {
  1034. err = -EINTR;
  1035. goto out_fput;
  1036. }
  1037. if (addr && !(shmflg & SHM_REMAP)) {
  1038. err = -EINVAL;
  1039. if (addr + size < addr)
  1040. goto invalid;
  1041. if (find_vma_intersection(current->mm, addr, addr + size))
  1042. goto invalid;
  1043. }
  1044. addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
  1045. *raddr = addr;
  1046. err = 0;
  1047. if (IS_ERR_VALUE(addr))
  1048. err = (long)addr;
  1049. invalid:
  1050. up_write(&current->mm->mmap_sem);
  1051. if (populate)
  1052. mm_populate(addr, populate);
  1053. out_fput:
  1054. fput(file);
  1055. out_nattch:
  1056. down_write(&shm_ids(ns).rwsem);
  1057. shp = shm_lock(ns, shmid);
  1058. shp->shm_nattch--;
  1059. if (shm_may_destroy(ns, shp))
  1060. shm_destroy(ns, shp);
  1061. else
  1062. shm_unlock(shp);
  1063. up_write(&shm_ids(ns).rwsem);
  1064. return err;
  1065. out_unlock:
  1066. rcu_read_unlock();
  1067. out:
  1068. return err;
  1069. }
  1070. SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
  1071. {
  1072. unsigned long ret;
  1073. long err;
  1074. err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
  1075. if (err)
  1076. return err;
  1077. force_successful_syscall_return();
  1078. return (long)ret;
  1079. }
  1080. /*
  1081. * detach and kill segment if marked destroyed.
  1082. * The work is done in shm_close.
  1083. */
  1084. SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
  1085. {
  1086. struct mm_struct *mm = current->mm;
  1087. struct vm_area_struct *vma;
  1088. unsigned long addr = (unsigned long)shmaddr;
  1089. int retval = -EINVAL;
  1090. #ifdef CONFIG_MMU
  1091. loff_t size = 0;
  1092. struct file *file;
  1093. struct vm_area_struct *next;
  1094. #endif
  1095. if (addr & ~PAGE_MASK)
  1096. return retval;
  1097. if (down_write_killable(&mm->mmap_sem))
  1098. return -EINTR;
  1099. /*
  1100. * This function tries to be smart and unmap shm segments that
  1101. * were modified by partial mlock or munmap calls:
  1102. * - It first determines the size of the shm segment that should be
  1103. * unmapped: It searches for a vma that is backed by shm and that
  1104. * started at address shmaddr. It records it's size and then unmaps
  1105. * it.
  1106. * - Then it unmaps all shm vmas that started at shmaddr and that
  1107. * are within the initially determined size and that are from the
  1108. * same shm segment from which we determined the size.
  1109. * Errors from do_munmap are ignored: the function only fails if
  1110. * it's called with invalid parameters or if it's called to unmap
  1111. * a part of a vma. Both calls in this function are for full vmas,
  1112. * the parameters are directly copied from the vma itself and always
  1113. * valid - therefore do_munmap cannot fail. (famous last words?)
  1114. */
  1115. /*
  1116. * If it had been mremap()'d, the starting address would not
  1117. * match the usual checks anyway. So assume all vma's are
  1118. * above the starting address given.
  1119. */
  1120. vma = find_vma(mm, addr);
  1121. #ifdef CONFIG_MMU
  1122. while (vma) {
  1123. next = vma->vm_next;
  1124. /*
  1125. * Check if the starting address would match, i.e. it's
  1126. * a fragment created by mprotect() and/or munmap(), or it
  1127. * otherwise it starts at this address with no hassles.
  1128. */
  1129. if ((vma->vm_ops == &shm_vm_ops) &&
  1130. (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
  1131. /*
  1132. * Record the file of the shm segment being
  1133. * unmapped. With mremap(), someone could place
  1134. * page from another segment but with equal offsets
  1135. * in the range we are unmapping.
  1136. */
  1137. file = vma->vm_file;
  1138. size = i_size_read(file_inode(vma->vm_file));
  1139. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1140. /*
  1141. * We discovered the size of the shm segment, so
  1142. * break out of here and fall through to the next
  1143. * loop that uses the size information to stop
  1144. * searching for matching vma's.
  1145. */
  1146. retval = 0;
  1147. vma = next;
  1148. break;
  1149. }
  1150. vma = next;
  1151. }
  1152. /*
  1153. * We need look no further than the maximum address a fragment
  1154. * could possibly have landed at. Also cast things to loff_t to
  1155. * prevent overflows and make comparisons vs. equal-width types.
  1156. */
  1157. size = PAGE_ALIGN(size);
  1158. while (vma && (loff_t)(vma->vm_end - addr) <= size) {
  1159. next = vma->vm_next;
  1160. /* finding a matching vma now does not alter retval */
  1161. if ((vma->vm_ops == &shm_vm_ops) &&
  1162. ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) &&
  1163. (vma->vm_file == file))
  1164. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1165. vma = next;
  1166. }
  1167. #else /* CONFIG_MMU */
  1168. /* under NOMMU conditions, the exact address to be destroyed must be
  1169. * given */
  1170. if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
  1171. do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
  1172. retval = 0;
  1173. }
  1174. #endif
  1175. up_write(&mm->mmap_sem);
  1176. return retval;
  1177. }
  1178. #ifdef CONFIG_PROC_FS
  1179. static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
  1180. {
  1181. struct user_namespace *user_ns = seq_user_ns(s);
  1182. struct shmid_kernel *shp = it;
  1183. unsigned long rss = 0, swp = 0;
  1184. shm_add_rss_swap(shp, &rss, &swp);
  1185. #if BITS_PER_LONG <= 32
  1186. #define SIZE_SPEC "%10lu"
  1187. #else
  1188. #define SIZE_SPEC "%21lu"
  1189. #endif
  1190. seq_printf(s,
  1191. "%10d %10d %4o " SIZE_SPEC " %5u %5u "
  1192. "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
  1193. SIZE_SPEC " " SIZE_SPEC "\n",
  1194. shp->shm_perm.key,
  1195. shp->shm_perm.id,
  1196. shp->shm_perm.mode,
  1197. shp->shm_segsz,
  1198. shp->shm_cprid,
  1199. shp->shm_lprid,
  1200. shp->shm_nattch,
  1201. from_kuid_munged(user_ns, shp->shm_perm.uid),
  1202. from_kgid_munged(user_ns, shp->shm_perm.gid),
  1203. from_kuid_munged(user_ns, shp->shm_perm.cuid),
  1204. from_kgid_munged(user_ns, shp->shm_perm.cgid),
  1205. shp->shm_atim,
  1206. shp->shm_dtim,
  1207. shp->shm_ctim,
  1208. rss * PAGE_SIZE,
  1209. swp * PAGE_SIZE);
  1210. return 0;
  1211. }
  1212. #endif