msg.c 22 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015
  1. /*
  2. * linux/ipc/msg.c
  3. * Copyright (C) 1992 Krishna Balasubramanian
  4. *
  5. * Removed all the remaining kerneld mess
  6. * Catch the -EFAULT stuff properly
  7. * Use GFP_KERNEL for messages as in 1.2
  8. * Fixed up the unchecked user space derefs
  9. * Copyright (C) 1998 Alan Cox & Andi Kleen
  10. *
  11. * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  12. *
  13. * mostly rewritten, threaded and wake-one semantics added
  14. * MSGMAX limit removed, sysctl's added
  15. * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  16. *
  17. * support for audit of ipc object properties and permission changes
  18. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  19. *
  20. * namespaces support
  21. * OpenVZ, SWsoft Inc.
  22. * Pavel Emelianov <xemul@openvz.org>
  23. */
  24. #include <linux/capability.h>
  25. #include <linux/msg.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/init.h>
  28. #include <linux/mm.h>
  29. #include <linux/proc_fs.h>
  30. #include <linux/list.h>
  31. #include <linux/security.h>
  32. #include <linux/sched.h>
  33. #include <linux/syscalls.h>
  34. #include <linux/audit.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/rwsem.h>
  37. #include <linux/nsproxy.h>
  38. #include <linux/ipc_namespace.h>
  39. #include <asm/current.h>
  40. #include <asm/uaccess.h>
  41. #include "util.h"
  42. /*
  43. * one msg_receiver structure for each sleeping receiver:
  44. */
  45. struct msg_receiver {
  46. struct list_head r_list;
  47. struct task_struct *r_tsk;
  48. int r_mode;
  49. long r_msgtype;
  50. long r_maxsize;
  51. struct msg_msg *volatile r_msg;
  52. };
  53. /* one msg_sender for each sleeping sender */
  54. struct msg_sender {
  55. struct list_head list;
  56. struct task_struct *tsk;
  57. };
  58. #define SEARCH_ANY 1
  59. #define SEARCH_EQUAL 2
  60. #define SEARCH_NOTEQUAL 3
  61. #define SEARCH_LESSEQUAL 4
  62. #define SEARCH_NUMBER 5
  63. #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
  64. #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
  65. static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
  66. static int newque(struct ipc_namespace *, struct ipc_params *);
  67. #ifdef CONFIG_PROC_FS
  68. static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
  69. #endif
  70. /*
  71. * Scale msgmni with the available lowmem size: the memory dedicated to msg
  72. * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
  73. * Also take into account the number of nsproxies created so far.
  74. * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
  75. */
  76. void recompute_msgmni(struct ipc_namespace *ns)
  77. {
  78. struct sysinfo i;
  79. unsigned long allowed;
  80. int nb_ns;
  81. si_meminfo(&i);
  82. allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
  83. / MSGMNB;
  84. nb_ns = atomic_read(&nr_ipc_ns);
  85. allowed /= nb_ns;
  86. if (allowed < MSGMNI) {
  87. ns->msg_ctlmni = MSGMNI;
  88. return;
  89. }
  90. if (allowed > IPCMNI / nb_ns) {
  91. ns->msg_ctlmni = IPCMNI / nb_ns;
  92. return;
  93. }
  94. ns->msg_ctlmni = allowed;
  95. }
  96. void msg_init_ns(struct ipc_namespace *ns)
  97. {
  98. ns->msg_ctlmax = MSGMAX;
  99. ns->msg_ctlmnb = MSGMNB;
  100. recompute_msgmni(ns);
  101. atomic_set(&ns->msg_bytes, 0);
  102. atomic_set(&ns->msg_hdrs, 0);
  103. ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
  104. }
  105. #ifdef CONFIG_IPC_NS
  106. void msg_exit_ns(struct ipc_namespace *ns)
  107. {
  108. free_ipcs(ns, &msg_ids(ns), freeque);
  109. idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
  110. }
  111. #endif
  112. void __init msg_init(void)
  113. {
  114. msg_init_ns(&init_ipc_ns);
  115. printk(KERN_INFO "msgmni has been set to %d\n",
  116. init_ipc_ns.msg_ctlmni);
  117. ipc_init_proc_interface("sysvipc/msg",
  118. " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
  119. IPC_MSG_IDS, sysvipc_msg_proc_show);
  120. }
  121. /*
  122. * msg_lock_(check_) routines are called in the paths where the rw_mutex
  123. * is not held.
  124. */
  125. static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
  126. {
  127. struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
  128. if (IS_ERR(ipcp))
  129. return (struct msg_queue *)ipcp;
  130. return container_of(ipcp, struct msg_queue, q_perm);
  131. }
  132. static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns,
  133. int id)
  134. {
  135. struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id);
  136. if (IS_ERR(ipcp))
  137. return (struct msg_queue *)ipcp;
  138. return container_of(ipcp, struct msg_queue, q_perm);
  139. }
  140. static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
  141. {
  142. ipc_rmid(&msg_ids(ns), &s->q_perm);
  143. }
  144. /**
  145. * newque - Create a new msg queue
  146. * @ns: namespace
  147. * @params: ptr to the structure that contains the key and msgflg
  148. *
  149. * Called with msg_ids.rw_mutex held (writer)
  150. */
  151. static int newque(struct ipc_namespace *ns, struct ipc_params *params)
  152. {
  153. struct msg_queue *msq;
  154. int id, retval;
  155. key_t key = params->key;
  156. int msgflg = params->flg;
  157. msq = ipc_rcu_alloc(sizeof(*msq));
  158. if (!msq)
  159. return -ENOMEM;
  160. msq->q_perm.mode = msgflg & S_IRWXUGO;
  161. msq->q_perm.key = key;
  162. msq->q_perm.security = NULL;
  163. retval = security_msg_queue_alloc(msq);
  164. if (retval) {
  165. ipc_rcu_putref(msq);
  166. return retval;
  167. }
  168. /*
  169. * ipc_addid() locks msq
  170. */
  171. id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
  172. if (id < 0) {
  173. security_msg_queue_free(msq);
  174. ipc_rcu_putref(msq);
  175. return id;
  176. }
  177. msq->q_stime = msq->q_rtime = 0;
  178. msq->q_ctime = get_seconds();
  179. msq->q_cbytes = msq->q_qnum = 0;
  180. msq->q_qbytes = ns->msg_ctlmnb;
  181. msq->q_lspid = msq->q_lrpid = 0;
  182. INIT_LIST_HEAD(&msq->q_messages);
  183. INIT_LIST_HEAD(&msq->q_receivers);
  184. INIT_LIST_HEAD(&msq->q_senders);
  185. msg_unlock(msq);
  186. return msq->q_perm.id;
  187. }
  188. static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
  189. {
  190. mss->tsk = current;
  191. current->state = TASK_INTERRUPTIBLE;
  192. list_add_tail(&mss->list, &msq->q_senders);
  193. }
  194. static inline void ss_del(struct msg_sender *mss)
  195. {
  196. if (mss->list.next != NULL)
  197. list_del(&mss->list);
  198. }
  199. static void ss_wakeup(struct list_head *h, int kill)
  200. {
  201. struct list_head *tmp;
  202. tmp = h->next;
  203. while (tmp != h) {
  204. struct msg_sender *mss;
  205. mss = list_entry(tmp, struct msg_sender, list);
  206. tmp = tmp->next;
  207. if (kill)
  208. mss->list.next = NULL;
  209. wake_up_process(mss->tsk);
  210. }
  211. }
  212. static void expunge_all(struct msg_queue *msq, int res)
  213. {
  214. struct list_head *tmp;
  215. tmp = msq->q_receivers.next;
  216. while (tmp != &msq->q_receivers) {
  217. struct msg_receiver *msr;
  218. msr = list_entry(tmp, struct msg_receiver, r_list);
  219. tmp = tmp->next;
  220. msr->r_msg = NULL;
  221. wake_up_process(msr->r_tsk);
  222. smp_mb();
  223. msr->r_msg = ERR_PTR(res);
  224. }
  225. }
  226. /*
  227. * freeque() wakes up waiters on the sender and receiver waiting queue,
  228. * removes the message queue from message queue ID IDR, and cleans up all the
  229. * messages associated with this queue.
  230. *
  231. * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
  232. * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
  233. */
  234. static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  235. {
  236. struct list_head *tmp;
  237. struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
  238. expunge_all(msq, -EIDRM);
  239. ss_wakeup(&msq->q_senders, 1);
  240. msg_rmid(ns, msq);
  241. msg_unlock(msq);
  242. tmp = msq->q_messages.next;
  243. while (tmp != &msq->q_messages) {
  244. struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
  245. tmp = tmp->next;
  246. atomic_dec(&ns->msg_hdrs);
  247. free_msg(msg);
  248. }
  249. atomic_sub(msq->q_cbytes, &ns->msg_bytes);
  250. security_msg_queue_free(msq);
  251. ipc_rcu_putref(msq);
  252. }
  253. /*
  254. * Called with msg_ids.rw_mutex and ipcp locked.
  255. */
  256. static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
  257. {
  258. struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
  259. return security_msg_queue_associate(msq, msgflg);
  260. }
  261. SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
  262. {
  263. struct ipc_namespace *ns;
  264. struct ipc_ops msg_ops;
  265. struct ipc_params msg_params;
  266. ns = current->nsproxy->ipc_ns;
  267. msg_ops.getnew = newque;
  268. msg_ops.associate = msg_security;
  269. msg_ops.more_checks = NULL;
  270. msg_params.key = key;
  271. msg_params.flg = msgflg;
  272. return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
  273. }
  274. static inline unsigned long
  275. copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
  276. {
  277. switch(version) {
  278. case IPC_64:
  279. return copy_to_user(buf, in, sizeof(*in));
  280. case IPC_OLD:
  281. {
  282. struct msqid_ds out;
  283. memset(&out, 0, sizeof(out));
  284. ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
  285. out.msg_stime = in->msg_stime;
  286. out.msg_rtime = in->msg_rtime;
  287. out.msg_ctime = in->msg_ctime;
  288. if (in->msg_cbytes > USHRT_MAX)
  289. out.msg_cbytes = USHRT_MAX;
  290. else
  291. out.msg_cbytes = in->msg_cbytes;
  292. out.msg_lcbytes = in->msg_cbytes;
  293. if (in->msg_qnum > USHRT_MAX)
  294. out.msg_qnum = USHRT_MAX;
  295. else
  296. out.msg_qnum = in->msg_qnum;
  297. if (in->msg_qbytes > USHRT_MAX)
  298. out.msg_qbytes = USHRT_MAX;
  299. else
  300. out.msg_qbytes = in->msg_qbytes;
  301. out.msg_lqbytes = in->msg_qbytes;
  302. out.msg_lspid = in->msg_lspid;
  303. out.msg_lrpid = in->msg_lrpid;
  304. return copy_to_user(buf, &out, sizeof(out));
  305. }
  306. default:
  307. return -EINVAL;
  308. }
  309. }
  310. static inline unsigned long
  311. copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
  312. {
  313. switch(version) {
  314. case IPC_64:
  315. if (copy_from_user(out, buf, sizeof(*out)))
  316. return -EFAULT;
  317. return 0;
  318. case IPC_OLD:
  319. {
  320. struct msqid_ds tbuf_old;
  321. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  322. return -EFAULT;
  323. out->msg_perm.uid = tbuf_old.msg_perm.uid;
  324. out->msg_perm.gid = tbuf_old.msg_perm.gid;
  325. out->msg_perm.mode = tbuf_old.msg_perm.mode;
  326. if (tbuf_old.msg_qbytes == 0)
  327. out->msg_qbytes = tbuf_old.msg_lqbytes;
  328. else
  329. out->msg_qbytes = tbuf_old.msg_qbytes;
  330. return 0;
  331. }
  332. default:
  333. return -EINVAL;
  334. }
  335. }
  336. /*
  337. * This function handles some msgctl commands which require the rw_mutex
  338. * to be held in write mode.
  339. * NOTE: no locks must be held, the rw_mutex is taken inside this function.
  340. */
  341. static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
  342. struct msqid_ds __user *buf, int version)
  343. {
  344. struct kern_ipc_perm *ipcp;
  345. struct msqid64_ds uninitialized_var(msqid64);
  346. struct msg_queue *msq;
  347. int err;
  348. if (cmd == IPC_SET) {
  349. if (copy_msqid_from_user(&msqid64, buf, version))
  350. return -EFAULT;
  351. }
  352. ipcp = ipcctl_pre_down(ns, &msg_ids(ns), msqid, cmd,
  353. &msqid64.msg_perm, msqid64.msg_qbytes);
  354. if (IS_ERR(ipcp))
  355. return PTR_ERR(ipcp);
  356. msq = container_of(ipcp, struct msg_queue, q_perm);
  357. err = security_msg_queue_msgctl(msq, cmd);
  358. if (err)
  359. goto out_unlock;
  360. switch (cmd) {
  361. case IPC_RMID:
  362. freeque(ns, ipcp);
  363. goto out_up;
  364. case IPC_SET:
  365. if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
  366. !capable(CAP_SYS_RESOURCE)) {
  367. err = -EPERM;
  368. goto out_unlock;
  369. }
  370. err = ipc_update_perm(&msqid64.msg_perm, ipcp);
  371. if (err)
  372. goto out_unlock;
  373. msq->q_qbytes = msqid64.msg_qbytes;
  374. msq->q_ctime = get_seconds();
  375. /* sleeping receivers might be excluded by
  376. * stricter permissions.
  377. */
  378. expunge_all(msq, -EAGAIN);
  379. /* sleeping senders might be able to send
  380. * due to a larger queue size.
  381. */
  382. ss_wakeup(&msq->q_senders, 0);
  383. break;
  384. default:
  385. err = -EINVAL;
  386. }
  387. out_unlock:
  388. msg_unlock(msq);
  389. out_up:
  390. up_write(&msg_ids(ns).rw_mutex);
  391. return err;
  392. }
  393. SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
  394. {
  395. struct msg_queue *msq;
  396. int err, version;
  397. struct ipc_namespace *ns;
  398. if (msqid < 0 || cmd < 0)
  399. return -EINVAL;
  400. version = ipc_parse_version(&cmd);
  401. ns = current->nsproxy->ipc_ns;
  402. switch (cmd) {
  403. case IPC_INFO:
  404. case MSG_INFO:
  405. {
  406. struct msginfo msginfo;
  407. int max_id;
  408. if (!buf)
  409. return -EFAULT;
  410. /*
  411. * We must not return kernel stack data.
  412. * due to padding, it's not enough
  413. * to set all member fields.
  414. */
  415. err = security_msg_queue_msgctl(NULL, cmd);
  416. if (err)
  417. return err;
  418. memset(&msginfo, 0, sizeof(msginfo));
  419. msginfo.msgmni = ns->msg_ctlmni;
  420. msginfo.msgmax = ns->msg_ctlmax;
  421. msginfo.msgmnb = ns->msg_ctlmnb;
  422. msginfo.msgssz = MSGSSZ;
  423. msginfo.msgseg = MSGSEG;
  424. down_read(&msg_ids(ns).rw_mutex);
  425. if (cmd == MSG_INFO) {
  426. msginfo.msgpool = msg_ids(ns).in_use;
  427. msginfo.msgmap = atomic_read(&ns->msg_hdrs);
  428. msginfo.msgtql = atomic_read(&ns->msg_bytes);
  429. } else {
  430. msginfo.msgmap = MSGMAP;
  431. msginfo.msgpool = MSGPOOL;
  432. msginfo.msgtql = MSGTQL;
  433. }
  434. max_id = ipc_get_maxid(&msg_ids(ns));
  435. up_read(&msg_ids(ns).rw_mutex);
  436. if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
  437. return -EFAULT;
  438. return (max_id < 0) ? 0 : max_id;
  439. }
  440. case MSG_STAT: /* msqid is an index rather than a msg queue id */
  441. case IPC_STAT:
  442. {
  443. struct msqid64_ds tbuf;
  444. int success_return;
  445. if (!buf)
  446. return -EFAULT;
  447. if (cmd == MSG_STAT) {
  448. msq = msg_lock(ns, msqid);
  449. if (IS_ERR(msq))
  450. return PTR_ERR(msq);
  451. success_return = msq->q_perm.id;
  452. } else {
  453. msq = msg_lock_check(ns, msqid);
  454. if (IS_ERR(msq))
  455. return PTR_ERR(msq);
  456. success_return = 0;
  457. }
  458. err = -EACCES;
  459. if (ipcperms(ns, &msq->q_perm, S_IRUGO))
  460. goto out_unlock;
  461. err = security_msg_queue_msgctl(msq, cmd);
  462. if (err)
  463. goto out_unlock;
  464. memset(&tbuf, 0, sizeof(tbuf));
  465. kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
  466. tbuf.msg_stime = msq->q_stime;
  467. tbuf.msg_rtime = msq->q_rtime;
  468. tbuf.msg_ctime = msq->q_ctime;
  469. tbuf.msg_cbytes = msq->q_cbytes;
  470. tbuf.msg_qnum = msq->q_qnum;
  471. tbuf.msg_qbytes = msq->q_qbytes;
  472. tbuf.msg_lspid = msq->q_lspid;
  473. tbuf.msg_lrpid = msq->q_lrpid;
  474. msg_unlock(msq);
  475. if (copy_msqid_to_user(buf, &tbuf, version))
  476. return -EFAULT;
  477. return success_return;
  478. }
  479. case IPC_SET:
  480. case IPC_RMID:
  481. err = msgctl_down(ns, msqid, cmd, buf, version);
  482. return err;
  483. default:
  484. return -EINVAL;
  485. }
  486. out_unlock:
  487. msg_unlock(msq);
  488. return err;
  489. }
  490. static int testmsg(struct msg_msg *msg, long type, int mode)
  491. {
  492. switch(mode)
  493. {
  494. case SEARCH_ANY:
  495. case SEARCH_NUMBER:
  496. return 1;
  497. case SEARCH_LESSEQUAL:
  498. if (msg->m_type <=type)
  499. return 1;
  500. break;
  501. case SEARCH_EQUAL:
  502. if (msg->m_type == type)
  503. return 1;
  504. break;
  505. case SEARCH_NOTEQUAL:
  506. if (msg->m_type != type)
  507. return 1;
  508. break;
  509. }
  510. return 0;
  511. }
  512. static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
  513. {
  514. struct list_head *tmp;
  515. tmp = msq->q_receivers.next;
  516. while (tmp != &msq->q_receivers) {
  517. struct msg_receiver *msr;
  518. msr = list_entry(tmp, struct msg_receiver, r_list);
  519. tmp = tmp->next;
  520. if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
  521. !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
  522. msr->r_msgtype, msr->r_mode)) {
  523. list_del(&msr->r_list);
  524. if (msr->r_maxsize < msg->m_ts) {
  525. msr->r_msg = NULL;
  526. wake_up_process(msr->r_tsk);
  527. smp_mb();
  528. msr->r_msg = ERR_PTR(-E2BIG);
  529. } else {
  530. msr->r_msg = NULL;
  531. msq->q_lrpid = task_pid_vnr(msr->r_tsk);
  532. msq->q_rtime = get_seconds();
  533. wake_up_process(msr->r_tsk);
  534. smp_mb();
  535. msr->r_msg = msg;
  536. return 1;
  537. }
  538. }
  539. }
  540. return 0;
  541. }
  542. long do_msgsnd(int msqid, long mtype, void __user *mtext,
  543. size_t msgsz, int msgflg)
  544. {
  545. struct msg_queue *msq;
  546. struct msg_msg *msg;
  547. int err;
  548. struct ipc_namespace *ns;
  549. ns = current->nsproxy->ipc_ns;
  550. if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
  551. return -EINVAL;
  552. if (mtype < 1)
  553. return -EINVAL;
  554. msg = load_msg(mtext, msgsz);
  555. if (IS_ERR(msg))
  556. return PTR_ERR(msg);
  557. msg->m_type = mtype;
  558. msg->m_ts = msgsz;
  559. msq = msg_lock_check(ns, msqid);
  560. if (IS_ERR(msq)) {
  561. err = PTR_ERR(msq);
  562. goto out_free;
  563. }
  564. for (;;) {
  565. struct msg_sender s;
  566. err = -EACCES;
  567. if (ipcperms(ns, &msq->q_perm, S_IWUGO))
  568. goto out_unlock_free;
  569. err = security_msg_queue_msgsnd(msq, msg, msgflg);
  570. if (err)
  571. goto out_unlock_free;
  572. if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
  573. 1 + msq->q_qnum <= msq->q_qbytes) {
  574. break;
  575. }
  576. /* queue full, wait: */
  577. if (msgflg & IPC_NOWAIT) {
  578. err = -EAGAIN;
  579. goto out_unlock_free;
  580. }
  581. ss_add(msq, &s);
  582. if (!ipc_rcu_getref(msq)) {
  583. err = -EIDRM;
  584. goto out_unlock_free;
  585. }
  586. msg_unlock(msq);
  587. schedule();
  588. ipc_lock_by_ptr(&msq->q_perm);
  589. ipc_rcu_putref(msq);
  590. if (msq->q_perm.deleted) {
  591. err = -EIDRM;
  592. goto out_unlock_free;
  593. }
  594. ss_del(&s);
  595. if (signal_pending(current)) {
  596. err = -ERESTARTNOHAND;
  597. goto out_unlock_free;
  598. }
  599. }
  600. msq->q_lspid = task_tgid_vnr(current);
  601. msq->q_stime = get_seconds();
  602. if (!pipelined_send(msq, msg)) {
  603. /* no one is waiting for this message, enqueue it */
  604. list_add_tail(&msg->m_list, &msq->q_messages);
  605. msq->q_cbytes += msgsz;
  606. msq->q_qnum++;
  607. atomic_add(msgsz, &ns->msg_bytes);
  608. atomic_inc(&ns->msg_hdrs);
  609. }
  610. err = 0;
  611. msg = NULL;
  612. out_unlock_free:
  613. msg_unlock(msq);
  614. out_free:
  615. if (msg != NULL)
  616. free_msg(msg);
  617. return err;
  618. }
  619. SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
  620. int, msgflg)
  621. {
  622. long mtype;
  623. if (get_user(mtype, &msgp->mtype))
  624. return -EFAULT;
  625. return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
  626. }
  627. static inline int convert_mode(long *msgtyp, int msgflg)
  628. {
  629. if (msgflg & MSG_COPY)
  630. return SEARCH_NUMBER;
  631. /*
  632. * find message of correct type.
  633. * msgtyp = 0 => get first.
  634. * msgtyp > 0 => get first message of matching type.
  635. * msgtyp < 0 => get message with least type must be < abs(msgtype).
  636. */
  637. if (*msgtyp == 0)
  638. return SEARCH_ANY;
  639. if (*msgtyp < 0) {
  640. *msgtyp = -*msgtyp;
  641. return SEARCH_LESSEQUAL;
  642. }
  643. if (msgflg & MSG_EXCEPT)
  644. return SEARCH_NOTEQUAL;
  645. return SEARCH_EQUAL;
  646. }
  647. static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
  648. {
  649. struct msgbuf __user *msgp = dest;
  650. size_t msgsz;
  651. if (put_user(msg->m_type, &msgp->mtype))
  652. return -EFAULT;
  653. msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
  654. if (store_msg(msgp->mtext, msg, msgsz))
  655. return -EFAULT;
  656. return msgsz;
  657. }
  658. #ifdef CONFIG_CHECKPOINT_RESTORE
  659. /*
  660. * This function creates new kernel message structure, large enough to store
  661. * bufsz message bytes.
  662. */
  663. static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
  664. {
  665. struct msg_msg *copy;
  666. /*
  667. * Create dummy message to copy real message to.
  668. */
  669. copy = load_msg(buf, bufsz);
  670. if (!IS_ERR(copy))
  671. copy->m_ts = bufsz;
  672. return copy;
  673. }
  674. static inline void free_copy(struct msg_msg *copy)
  675. {
  676. if (copy)
  677. free_msg(copy);
  678. }
  679. #else
  680. static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
  681. {
  682. return ERR_PTR(-ENOSYS);
  683. }
  684. static inline void free_copy(struct msg_msg *copy)
  685. {
  686. }
  687. #endif
  688. static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
  689. {
  690. struct msg_msg *msg;
  691. long count = 0;
  692. list_for_each_entry(msg, &msq->q_messages, m_list) {
  693. if (testmsg(msg, *msgtyp, mode) &&
  694. !security_msg_queue_msgrcv(msq, msg, current,
  695. *msgtyp, mode)) {
  696. if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
  697. *msgtyp = msg->m_type - 1;
  698. } else if (mode == SEARCH_NUMBER) {
  699. if (*msgtyp == count)
  700. return msg;
  701. } else
  702. return msg;
  703. count++;
  704. }
  705. }
  706. return ERR_PTR(-EAGAIN);
  707. }
  708. long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
  709. int msgflg,
  710. long (*msg_handler)(void __user *, struct msg_msg *, size_t))
  711. {
  712. struct msg_queue *msq;
  713. struct msg_msg *msg;
  714. int mode;
  715. struct ipc_namespace *ns;
  716. struct msg_msg *copy = NULL;
  717. ns = current->nsproxy->ipc_ns;
  718. if (msqid < 0 || (long) bufsz < 0)
  719. return -EINVAL;
  720. if (msgflg & MSG_COPY) {
  721. copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
  722. if (IS_ERR(copy))
  723. return PTR_ERR(copy);
  724. }
  725. mode = convert_mode(&msgtyp, msgflg);
  726. msq = msg_lock_check(ns, msqid);
  727. if (IS_ERR(msq)) {
  728. free_copy(copy);
  729. return PTR_ERR(msq);
  730. }
  731. for (;;) {
  732. struct msg_receiver msr_d;
  733. msg = ERR_PTR(-EACCES);
  734. if (ipcperms(ns, &msq->q_perm, S_IRUGO))
  735. goto out_unlock;
  736. msg = find_msg(msq, &msgtyp, mode);
  737. if (!IS_ERR(msg)) {
  738. /*
  739. * Found a suitable message.
  740. * Unlink it from the queue.
  741. */
  742. if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
  743. msg = ERR_PTR(-E2BIG);
  744. goto out_unlock;
  745. }
  746. /*
  747. * If we are copying, then do not unlink message and do
  748. * not update queue parameters.
  749. */
  750. if (msgflg & MSG_COPY) {
  751. msg = copy_msg(msg, copy);
  752. goto out_unlock;
  753. }
  754. list_del(&msg->m_list);
  755. msq->q_qnum--;
  756. msq->q_rtime = get_seconds();
  757. msq->q_lrpid = task_tgid_vnr(current);
  758. msq->q_cbytes -= msg->m_ts;
  759. atomic_sub(msg->m_ts, &ns->msg_bytes);
  760. atomic_dec(&ns->msg_hdrs);
  761. ss_wakeup(&msq->q_senders, 0);
  762. msg_unlock(msq);
  763. break;
  764. }
  765. /* No message waiting. Wait for a message */
  766. if (msgflg & IPC_NOWAIT) {
  767. msg = ERR_PTR(-ENOMSG);
  768. goto out_unlock;
  769. }
  770. list_add_tail(&msr_d.r_list, &msq->q_receivers);
  771. msr_d.r_tsk = current;
  772. msr_d.r_msgtype = msgtyp;
  773. msr_d.r_mode = mode;
  774. if (msgflg & MSG_NOERROR)
  775. msr_d.r_maxsize = INT_MAX;
  776. else
  777. msr_d.r_maxsize = bufsz;
  778. msr_d.r_msg = ERR_PTR(-EAGAIN);
  779. current->state = TASK_INTERRUPTIBLE;
  780. msg_unlock(msq);
  781. schedule();
  782. /* Lockless receive, part 1:
  783. * Disable preemption. We don't hold a reference to the queue
  784. * and getting a reference would defeat the idea of a lockless
  785. * operation, thus the code relies on rcu to guarantee the
  786. * existence of msq:
  787. * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
  788. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
  789. * rcu_read_lock() prevents preemption between reading r_msg
  790. * and the spin_lock() inside ipc_lock_by_ptr().
  791. */
  792. rcu_read_lock();
  793. /* Lockless receive, part 2:
  794. * Wait until pipelined_send or expunge_all are outside of
  795. * wake_up_process(). There is a race with exit(), see
  796. * ipc/mqueue.c for the details.
  797. */
  798. msg = (struct msg_msg*)msr_d.r_msg;
  799. while (msg == NULL) {
  800. cpu_relax();
  801. msg = (struct msg_msg *)msr_d.r_msg;
  802. }
  803. /* Lockless receive, part 3:
  804. * If there is a message or an error then accept it without
  805. * locking.
  806. */
  807. if (msg != ERR_PTR(-EAGAIN)) {
  808. rcu_read_unlock();
  809. break;
  810. }
  811. /* Lockless receive, part 3:
  812. * Acquire the queue spinlock.
  813. */
  814. ipc_lock_by_ptr(&msq->q_perm);
  815. rcu_read_unlock();
  816. /* Lockless receive, part 4:
  817. * Repeat test after acquiring the spinlock.
  818. */
  819. msg = (struct msg_msg*)msr_d.r_msg;
  820. if (msg != ERR_PTR(-EAGAIN))
  821. goto out_unlock;
  822. list_del(&msr_d.r_list);
  823. if (signal_pending(current)) {
  824. msg = ERR_PTR(-ERESTARTNOHAND);
  825. out_unlock:
  826. msg_unlock(msq);
  827. break;
  828. }
  829. }
  830. if (IS_ERR(msg)) {
  831. free_copy(copy);
  832. return PTR_ERR(msg);
  833. }
  834. bufsz = msg_handler(buf, msg, bufsz);
  835. free_msg(msg);
  836. return bufsz;
  837. }
  838. SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
  839. long, msgtyp, int, msgflg)
  840. {
  841. return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
  842. }
  843. #ifdef CONFIG_PROC_FS
  844. static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
  845. {
  846. struct user_namespace *user_ns = seq_user_ns(s);
  847. struct msg_queue *msq = it;
  848. return seq_printf(s,
  849. "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
  850. msq->q_perm.key,
  851. msq->q_perm.id,
  852. msq->q_perm.mode,
  853. msq->q_cbytes,
  854. msq->q_qnum,
  855. msq->q_lspid,
  856. msq->q_lrpid,
  857. from_kuid_munged(user_ns, msq->q_perm.uid),
  858. from_kgid_munged(user_ns, msq->q_perm.gid),
  859. from_kuid_munged(user_ns, msq->q_perm.cuid),
  860. from_kgid_munged(user_ns, msq->q_perm.cgid),
  861. msq->q_stime,
  862. msq->q_rtime,
  863. msq->q_ctime);
  864. }
  865. #endif