msg.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. /*
  2. * linux/ipc/msg.c
  3. * Copyright (C) 1992 Krishna Balasubramanian
  4. *
  5. * Removed all the remaining kerneld mess
  6. * Catch the -EFAULT stuff properly
  7. * Use GFP_KERNEL for messages as in 1.2
  8. * Fixed up the unchecked user space derefs
  9. * Copyright (C) 1998 Alan Cox & Andi Kleen
  10. *
  11. * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
  12. *
  13. * mostly rewritten, threaded and wake-one semantics added
  14. * MSGMAX limit removed, sysctl's added
  15. * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
  16. *
  17. * support for audit of ipc object properties and permission changes
  18. * Dustin Kirkland <dustin.kirkland@us.ibm.com>
  19. *
  20. * namespaces support
  21. * OpenVZ, SWsoft Inc.
  22. * Pavel Emelianov <xemul@openvz.org>
  23. */
  24. #include <linux/capability.h>
  25. #include <linux/msg.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/init.h>
  28. #include <linux/mm.h>
  29. #include <linux/proc_fs.h>
  30. #include <linux/list.h>
  31. #include <linux/security.h>
  32. #include <linux/sched.h>
  33. #include <linux/syscalls.h>
  34. #include <linux/audit.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/rwsem.h>
  37. #include <linux/nsproxy.h>
  38. #include <linux/ipc_namespace.h>
  39. #include <asm/current.h>
  40. #include <linux/uaccess.h>
  41. #include "util.h"
  42. /* one msg_receiver structure for each sleeping receiver */
  43. struct msg_receiver {
  44. struct list_head r_list;
  45. struct task_struct *r_tsk;
  46. int r_mode;
  47. long r_msgtype;
  48. long r_maxsize;
  49. /*
  50. * Mark r_msg volatile so that the compiler
  51. * does not try to get smart and optimize
  52. * it. We rely on this for the lockless
  53. * receive algorithm.
  54. */
  55. struct msg_msg *volatile r_msg;
  56. };
  57. /* one msg_sender for each sleeping sender */
  58. struct msg_sender {
  59. struct list_head list;
  60. struct task_struct *tsk;
  61. };
  62. #define SEARCH_ANY 1
  63. #define SEARCH_EQUAL 2
  64. #define SEARCH_NOTEQUAL 3
  65. #define SEARCH_LESSEQUAL 4
  66. #define SEARCH_NUMBER 5
  67. #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS])
  68. static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id)
  69. {
  70. struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id);
  71. if (IS_ERR(ipcp))
  72. return ERR_CAST(ipcp);
  73. return container_of(ipcp, struct msg_queue, q_perm);
  74. }
  75. static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns,
  76. int id)
  77. {
  78. struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&msg_ids(ns), id);
  79. if (IS_ERR(ipcp))
  80. return ERR_CAST(ipcp);
  81. return container_of(ipcp, struct msg_queue, q_perm);
  82. }
  83. static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
  84. {
  85. ipc_rmid(&msg_ids(ns), &s->q_perm);
  86. }
  87. static void msg_rcu_free(struct rcu_head *head)
  88. {
  89. struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
  90. struct msg_queue *msq = ipc_rcu_to_struct(p);
  91. security_msg_queue_free(msq);
  92. ipc_rcu_free(head);
  93. }
  94. /**
  95. * newque - Create a new msg queue
  96. * @ns: namespace
  97. * @params: ptr to the structure that contains the key and msgflg
  98. *
  99. * Called with msg_ids.rwsem held (writer)
  100. */
  101. static int newque(struct ipc_namespace *ns, struct ipc_params *params)
  102. {
  103. struct msg_queue *msq;
  104. int id, retval;
  105. key_t key = params->key;
  106. int msgflg = params->flg;
  107. msq = ipc_rcu_alloc(sizeof(*msq));
  108. if (!msq)
  109. return -ENOMEM;
  110. msq->q_perm.mode = msgflg & S_IRWXUGO;
  111. msq->q_perm.key = key;
  112. msq->q_perm.security = NULL;
  113. retval = security_msg_queue_alloc(msq);
  114. if (retval) {
  115. ipc_rcu_putref(msq, ipc_rcu_free);
  116. return retval;
  117. }
  118. /* ipc_addid() locks msq upon success. */
  119. id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
  120. if (id < 0) {
  121. ipc_rcu_putref(msq, msg_rcu_free);
  122. return id;
  123. }
  124. msq->q_stime = msq->q_rtime = 0;
  125. msq->q_ctime = get_seconds();
  126. msq->q_cbytes = msq->q_qnum = 0;
  127. msq->q_qbytes = ns->msg_ctlmnb;
  128. msq->q_lspid = msq->q_lrpid = 0;
  129. INIT_LIST_HEAD(&msq->q_messages);
  130. INIT_LIST_HEAD(&msq->q_receivers);
  131. INIT_LIST_HEAD(&msq->q_senders);
  132. ipc_unlock_object(&msq->q_perm);
  133. rcu_read_unlock();
  134. return msq->q_perm.id;
  135. }
  136. static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
  137. {
  138. mss->tsk = current;
  139. __set_current_state(TASK_INTERRUPTIBLE);
  140. list_add_tail(&mss->list, &msq->q_senders);
  141. }
  142. static inline void ss_del(struct msg_sender *mss)
  143. {
  144. if (mss->list.next != NULL)
  145. list_del(&mss->list);
  146. }
  147. static void ss_wakeup(struct list_head *h, int kill)
  148. {
  149. struct msg_sender *mss, *t;
  150. list_for_each_entry_safe(mss, t, h, list) {
  151. if (kill)
  152. mss->list.next = NULL;
  153. wake_up_process(mss->tsk);
  154. }
  155. }
  156. static void expunge_all(struct msg_queue *msq, int res)
  157. {
  158. struct msg_receiver *msr, *t;
  159. list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
  160. msr->r_msg = NULL; /* initialize expunge ordering */
  161. wake_up_process(msr->r_tsk);
  162. /*
  163. * Ensure that the wakeup is visible before setting r_msg as
  164. * the receiving end depends on it: either spinning on a nil,
  165. * or dealing with -EAGAIN cases. See lockless receive part 1
  166. * and 2 in do_msgrcv().
  167. */
  168. smp_mb();
  169. msr->r_msg = ERR_PTR(res);
  170. }
  171. }
  172. /*
  173. * freeque() wakes up waiters on the sender and receiver waiting queue,
  174. * removes the message queue from message queue ID IDR, and cleans up all the
  175. * messages associated with this queue.
  176. *
  177. * msg_ids.rwsem (writer) and the spinlock for this message queue are held
  178. * before freeque() is called. msg_ids.rwsem remains locked on exit.
  179. */
  180. static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
  181. {
  182. struct msg_msg *msg, *t;
  183. struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
  184. expunge_all(msq, -EIDRM);
  185. ss_wakeup(&msq->q_senders, 1);
  186. msg_rmid(ns, msq);
  187. ipc_unlock_object(&msq->q_perm);
  188. rcu_read_unlock();
  189. list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) {
  190. atomic_dec(&ns->msg_hdrs);
  191. free_msg(msg);
  192. }
  193. atomic_sub(msq->q_cbytes, &ns->msg_bytes);
  194. ipc_rcu_putref(msq, msg_rcu_free);
  195. }
  196. /*
  197. * Called with msg_ids.rwsem and ipcp locked.
  198. */
  199. static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
  200. {
  201. struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
  202. return security_msg_queue_associate(msq, msgflg);
  203. }
  204. SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
  205. {
  206. struct ipc_namespace *ns;
  207. static const struct ipc_ops msg_ops = {
  208. .getnew = newque,
  209. .associate = msg_security,
  210. };
  211. struct ipc_params msg_params;
  212. ns = current->nsproxy->ipc_ns;
  213. msg_params.key = key;
  214. msg_params.flg = msgflg;
  215. return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
  216. }
  217. static inline unsigned long
  218. copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
  219. {
  220. switch (version) {
  221. case IPC_64:
  222. return copy_to_user(buf, in, sizeof(*in));
  223. case IPC_OLD:
  224. {
  225. struct msqid_ds out;
  226. memset(&out, 0, sizeof(out));
  227. ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
  228. out.msg_stime = in->msg_stime;
  229. out.msg_rtime = in->msg_rtime;
  230. out.msg_ctime = in->msg_ctime;
  231. if (in->msg_cbytes > USHRT_MAX)
  232. out.msg_cbytes = USHRT_MAX;
  233. else
  234. out.msg_cbytes = in->msg_cbytes;
  235. out.msg_lcbytes = in->msg_cbytes;
  236. if (in->msg_qnum > USHRT_MAX)
  237. out.msg_qnum = USHRT_MAX;
  238. else
  239. out.msg_qnum = in->msg_qnum;
  240. if (in->msg_qbytes > USHRT_MAX)
  241. out.msg_qbytes = USHRT_MAX;
  242. else
  243. out.msg_qbytes = in->msg_qbytes;
  244. out.msg_lqbytes = in->msg_qbytes;
  245. out.msg_lspid = in->msg_lspid;
  246. out.msg_lrpid = in->msg_lrpid;
  247. return copy_to_user(buf, &out, sizeof(out));
  248. }
  249. default:
  250. return -EINVAL;
  251. }
  252. }
  253. static inline unsigned long
  254. copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
  255. {
  256. switch (version) {
  257. case IPC_64:
  258. if (copy_from_user(out, buf, sizeof(*out)))
  259. return -EFAULT;
  260. return 0;
  261. case IPC_OLD:
  262. {
  263. struct msqid_ds tbuf_old;
  264. if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
  265. return -EFAULT;
  266. out->msg_perm.uid = tbuf_old.msg_perm.uid;
  267. out->msg_perm.gid = tbuf_old.msg_perm.gid;
  268. out->msg_perm.mode = tbuf_old.msg_perm.mode;
  269. if (tbuf_old.msg_qbytes == 0)
  270. out->msg_qbytes = tbuf_old.msg_lqbytes;
  271. else
  272. out->msg_qbytes = tbuf_old.msg_qbytes;
  273. return 0;
  274. }
  275. default:
  276. return -EINVAL;
  277. }
  278. }
  279. /*
  280. * This function handles some msgctl commands which require the rwsem
  281. * to be held in write mode.
  282. * NOTE: no locks must be held, the rwsem is taken inside this function.
  283. */
  284. static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
  285. struct msqid_ds __user *buf, int version)
  286. {
  287. struct kern_ipc_perm *ipcp;
  288. struct msqid64_ds uninitialized_var(msqid64);
  289. struct msg_queue *msq;
  290. int err;
  291. if (cmd == IPC_SET) {
  292. if (copy_msqid_from_user(&msqid64, buf, version))
  293. return -EFAULT;
  294. }
  295. down_write(&msg_ids(ns).rwsem);
  296. rcu_read_lock();
  297. ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd,
  298. &msqid64.msg_perm, msqid64.msg_qbytes);
  299. if (IS_ERR(ipcp)) {
  300. err = PTR_ERR(ipcp);
  301. goto out_unlock1;
  302. }
  303. msq = container_of(ipcp, struct msg_queue, q_perm);
  304. err = security_msg_queue_msgctl(msq, cmd);
  305. if (err)
  306. goto out_unlock1;
  307. switch (cmd) {
  308. case IPC_RMID:
  309. ipc_lock_object(&msq->q_perm);
  310. /* freeque unlocks the ipc object and rcu */
  311. freeque(ns, ipcp);
  312. goto out_up;
  313. case IPC_SET:
  314. if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
  315. !capable(CAP_SYS_RESOURCE)) {
  316. err = -EPERM;
  317. goto out_unlock1;
  318. }
  319. ipc_lock_object(&msq->q_perm);
  320. err = ipc_update_perm(&msqid64.msg_perm, ipcp);
  321. if (err)
  322. goto out_unlock0;
  323. msq->q_qbytes = msqid64.msg_qbytes;
  324. msq->q_ctime = get_seconds();
  325. /* sleeping receivers might be excluded by
  326. * stricter permissions.
  327. */
  328. expunge_all(msq, -EAGAIN);
  329. /* sleeping senders might be able to send
  330. * due to a larger queue size.
  331. */
  332. ss_wakeup(&msq->q_senders, 0);
  333. break;
  334. default:
  335. err = -EINVAL;
  336. goto out_unlock1;
  337. }
  338. out_unlock0:
  339. ipc_unlock_object(&msq->q_perm);
  340. out_unlock1:
  341. rcu_read_unlock();
  342. out_up:
  343. up_write(&msg_ids(ns).rwsem);
  344. return err;
  345. }
  346. static int msgctl_nolock(struct ipc_namespace *ns, int msqid,
  347. int cmd, int version, void __user *buf)
  348. {
  349. int err;
  350. struct msg_queue *msq;
  351. switch (cmd) {
  352. case IPC_INFO:
  353. case MSG_INFO:
  354. {
  355. struct msginfo msginfo;
  356. int max_id;
  357. if (!buf)
  358. return -EFAULT;
  359. /*
  360. * We must not return kernel stack data.
  361. * due to padding, it's not enough
  362. * to set all member fields.
  363. */
  364. err = security_msg_queue_msgctl(NULL, cmd);
  365. if (err)
  366. return err;
  367. memset(&msginfo, 0, sizeof(msginfo));
  368. msginfo.msgmni = ns->msg_ctlmni;
  369. msginfo.msgmax = ns->msg_ctlmax;
  370. msginfo.msgmnb = ns->msg_ctlmnb;
  371. msginfo.msgssz = MSGSSZ;
  372. msginfo.msgseg = MSGSEG;
  373. down_read(&msg_ids(ns).rwsem);
  374. if (cmd == MSG_INFO) {
  375. msginfo.msgpool = msg_ids(ns).in_use;
  376. msginfo.msgmap = atomic_read(&ns->msg_hdrs);
  377. msginfo.msgtql = atomic_read(&ns->msg_bytes);
  378. } else {
  379. msginfo.msgmap = MSGMAP;
  380. msginfo.msgpool = MSGPOOL;
  381. msginfo.msgtql = MSGTQL;
  382. }
  383. max_id = ipc_get_maxid(&msg_ids(ns));
  384. up_read(&msg_ids(ns).rwsem);
  385. if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
  386. return -EFAULT;
  387. return (max_id < 0) ? 0 : max_id;
  388. }
  389. case MSG_STAT:
  390. case IPC_STAT:
  391. {
  392. struct msqid64_ds tbuf;
  393. int success_return;
  394. if (!buf)
  395. return -EFAULT;
  396. memset(&tbuf, 0, sizeof(tbuf));
  397. rcu_read_lock();
  398. if (cmd == MSG_STAT) {
  399. msq = msq_obtain_object(ns, msqid);
  400. if (IS_ERR(msq)) {
  401. err = PTR_ERR(msq);
  402. goto out_unlock;
  403. }
  404. success_return = msq->q_perm.id;
  405. } else {
  406. msq = msq_obtain_object_check(ns, msqid);
  407. if (IS_ERR(msq)) {
  408. err = PTR_ERR(msq);
  409. goto out_unlock;
  410. }
  411. success_return = 0;
  412. }
  413. err = -EACCES;
  414. if (ipcperms(ns, &msq->q_perm, S_IRUGO))
  415. goto out_unlock;
  416. err = security_msg_queue_msgctl(msq, cmd);
  417. if (err)
  418. goto out_unlock;
  419. kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
  420. tbuf.msg_stime = msq->q_stime;
  421. tbuf.msg_rtime = msq->q_rtime;
  422. tbuf.msg_ctime = msq->q_ctime;
  423. tbuf.msg_cbytes = msq->q_cbytes;
  424. tbuf.msg_qnum = msq->q_qnum;
  425. tbuf.msg_qbytes = msq->q_qbytes;
  426. tbuf.msg_lspid = msq->q_lspid;
  427. tbuf.msg_lrpid = msq->q_lrpid;
  428. rcu_read_unlock();
  429. if (copy_msqid_to_user(buf, &tbuf, version))
  430. return -EFAULT;
  431. return success_return;
  432. }
  433. default:
  434. return -EINVAL;
  435. }
  436. return err;
  437. out_unlock:
  438. rcu_read_unlock();
  439. return err;
  440. }
  441. SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
  442. {
  443. int version;
  444. struct ipc_namespace *ns;
  445. if (msqid < 0 || cmd < 0)
  446. return -EINVAL;
  447. version = ipc_parse_version(&cmd);
  448. ns = current->nsproxy->ipc_ns;
  449. switch (cmd) {
  450. case IPC_INFO:
  451. case MSG_INFO:
  452. case MSG_STAT: /* msqid is an index rather than a msg queue id */
  453. case IPC_STAT:
  454. return msgctl_nolock(ns, msqid, cmd, version, buf);
  455. case IPC_SET:
  456. case IPC_RMID:
  457. return msgctl_down(ns, msqid, cmd, buf, version);
  458. default:
  459. return -EINVAL;
  460. }
  461. }
  462. static int testmsg(struct msg_msg *msg, long type, int mode)
  463. {
  464. switch (mode) {
  465. case SEARCH_ANY:
  466. case SEARCH_NUMBER:
  467. return 1;
  468. case SEARCH_LESSEQUAL:
  469. if (msg->m_type <= type)
  470. return 1;
  471. break;
  472. case SEARCH_EQUAL:
  473. if (msg->m_type == type)
  474. return 1;
  475. break;
  476. case SEARCH_NOTEQUAL:
  477. if (msg->m_type != type)
  478. return 1;
  479. break;
  480. }
  481. return 0;
  482. }
  483. static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
  484. {
  485. struct msg_receiver *msr, *t;
  486. list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
  487. if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
  488. !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
  489. msr->r_msgtype, msr->r_mode)) {
  490. list_del(&msr->r_list);
  491. if (msr->r_maxsize < msg->m_ts) {
  492. /* initialize pipelined send ordering */
  493. msr->r_msg = NULL;
  494. wake_up_process(msr->r_tsk);
  495. smp_mb(); /* see barrier comment below */
  496. msr->r_msg = ERR_PTR(-E2BIG);
  497. } else {
  498. msr->r_msg = NULL;
  499. msq->q_lrpid = task_pid_vnr(msr->r_tsk);
  500. msq->q_rtime = get_seconds();
  501. wake_up_process(msr->r_tsk);
  502. /*
  503. * Ensure that the wakeup is visible before
  504. * setting r_msg, as the receiving end depends
  505. * on it. See lockless receive part 1 and 2 in
  506. * do_msgrcv().
  507. */
  508. smp_mb();
  509. msr->r_msg = msg;
  510. return 1;
  511. }
  512. }
  513. }
  514. return 0;
  515. }
  516. long do_msgsnd(int msqid, long mtype, void __user *mtext,
  517. size_t msgsz, int msgflg)
  518. {
  519. struct msg_queue *msq;
  520. struct msg_msg *msg;
  521. int err;
  522. struct ipc_namespace *ns;
  523. ns = current->nsproxy->ipc_ns;
  524. if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
  525. return -EINVAL;
  526. if (mtype < 1)
  527. return -EINVAL;
  528. msg = load_msg(mtext, msgsz);
  529. if (IS_ERR(msg))
  530. return PTR_ERR(msg);
  531. msg->m_type = mtype;
  532. msg->m_ts = msgsz;
  533. rcu_read_lock();
  534. msq = msq_obtain_object_check(ns, msqid);
  535. if (IS_ERR(msq)) {
  536. err = PTR_ERR(msq);
  537. goto out_unlock1;
  538. }
  539. ipc_lock_object(&msq->q_perm);
  540. for (;;) {
  541. struct msg_sender s;
  542. err = -EACCES;
  543. if (ipcperms(ns, &msq->q_perm, S_IWUGO))
  544. goto out_unlock0;
  545. /* raced with RMID? */
  546. if (!ipc_valid_object(&msq->q_perm)) {
  547. err = -EIDRM;
  548. goto out_unlock0;
  549. }
  550. err = security_msg_queue_msgsnd(msq, msg, msgflg);
  551. if (err)
  552. goto out_unlock0;
  553. if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
  554. 1 + msq->q_qnum <= msq->q_qbytes) {
  555. break;
  556. }
  557. /* queue full, wait: */
  558. if (msgflg & IPC_NOWAIT) {
  559. err = -EAGAIN;
  560. goto out_unlock0;
  561. }
  562. /* enqueue the sender and prepare to block */
  563. ss_add(msq, &s);
  564. if (!ipc_rcu_getref(msq)) {
  565. err = -EIDRM;
  566. goto out_unlock0;
  567. }
  568. ipc_unlock_object(&msq->q_perm);
  569. rcu_read_unlock();
  570. schedule();
  571. rcu_read_lock();
  572. ipc_lock_object(&msq->q_perm);
  573. ipc_rcu_putref(msq, ipc_rcu_free);
  574. /* raced with RMID? */
  575. if (!ipc_valid_object(&msq->q_perm)) {
  576. err = -EIDRM;
  577. goto out_unlock0;
  578. }
  579. ss_del(&s);
  580. if (signal_pending(current)) {
  581. err = -ERESTARTNOHAND;
  582. goto out_unlock0;
  583. }
  584. }
  585. msq->q_lspid = task_tgid_vnr(current);
  586. msq->q_stime = get_seconds();
  587. if (!pipelined_send(msq, msg)) {
  588. /* no one is waiting for this message, enqueue it */
  589. list_add_tail(&msg->m_list, &msq->q_messages);
  590. msq->q_cbytes += msgsz;
  591. msq->q_qnum++;
  592. atomic_add(msgsz, &ns->msg_bytes);
  593. atomic_inc(&ns->msg_hdrs);
  594. }
  595. err = 0;
  596. msg = NULL;
  597. out_unlock0:
  598. ipc_unlock_object(&msq->q_perm);
  599. out_unlock1:
  600. rcu_read_unlock();
  601. if (msg != NULL)
  602. free_msg(msg);
  603. return err;
  604. }
  605. SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
  606. int, msgflg)
  607. {
  608. long mtype;
  609. if (get_user(mtype, &msgp->mtype))
  610. return -EFAULT;
  611. return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg);
  612. }
  613. static inline int convert_mode(long *msgtyp, int msgflg)
  614. {
  615. if (msgflg & MSG_COPY)
  616. return SEARCH_NUMBER;
  617. /*
  618. * find message of correct type.
  619. * msgtyp = 0 => get first.
  620. * msgtyp > 0 => get first message of matching type.
  621. * msgtyp < 0 => get message with least type must be < abs(msgtype).
  622. */
  623. if (*msgtyp == 0)
  624. return SEARCH_ANY;
  625. if (*msgtyp < 0) {
  626. *msgtyp = -*msgtyp;
  627. return SEARCH_LESSEQUAL;
  628. }
  629. if (msgflg & MSG_EXCEPT)
  630. return SEARCH_NOTEQUAL;
  631. return SEARCH_EQUAL;
  632. }
  633. static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
  634. {
  635. struct msgbuf __user *msgp = dest;
  636. size_t msgsz;
  637. if (put_user(msg->m_type, &msgp->mtype))
  638. return -EFAULT;
  639. msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
  640. if (store_msg(msgp->mtext, msg, msgsz))
  641. return -EFAULT;
  642. return msgsz;
  643. }
  644. #ifdef CONFIG_CHECKPOINT_RESTORE
  645. /*
  646. * This function creates new kernel message structure, large enough to store
  647. * bufsz message bytes.
  648. */
  649. static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
  650. {
  651. struct msg_msg *copy;
  652. /*
  653. * Create dummy message to copy real message to.
  654. */
  655. copy = load_msg(buf, bufsz);
  656. if (!IS_ERR(copy))
  657. copy->m_ts = bufsz;
  658. return copy;
  659. }
  660. static inline void free_copy(struct msg_msg *copy)
  661. {
  662. if (copy)
  663. free_msg(copy);
  664. }
  665. #else
  666. static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
  667. {
  668. return ERR_PTR(-ENOSYS);
  669. }
  670. static inline void free_copy(struct msg_msg *copy)
  671. {
  672. }
  673. #endif
  674. static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
  675. {
  676. struct msg_msg *msg, *found = NULL;
  677. long count = 0;
  678. list_for_each_entry(msg, &msq->q_messages, m_list) {
  679. if (testmsg(msg, *msgtyp, mode) &&
  680. !security_msg_queue_msgrcv(msq, msg, current,
  681. *msgtyp, mode)) {
  682. if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
  683. *msgtyp = msg->m_type - 1;
  684. found = msg;
  685. } else if (mode == SEARCH_NUMBER) {
  686. if (*msgtyp == count)
  687. return msg;
  688. } else
  689. return msg;
  690. count++;
  691. }
  692. }
  693. return found ?: ERR_PTR(-EAGAIN);
  694. }
  695. long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,
  696. long (*msg_handler)(void __user *, struct msg_msg *, size_t))
  697. {
  698. int mode;
  699. struct msg_queue *msq;
  700. struct ipc_namespace *ns;
  701. struct msg_msg *msg, *copy = NULL;
  702. ns = current->nsproxy->ipc_ns;
  703. if (msqid < 0 || (long) bufsz < 0)
  704. return -EINVAL;
  705. if (msgflg & MSG_COPY) {
  706. if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
  707. return -EINVAL;
  708. copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
  709. if (IS_ERR(copy))
  710. return PTR_ERR(copy);
  711. }
  712. mode = convert_mode(&msgtyp, msgflg);
  713. rcu_read_lock();
  714. msq = msq_obtain_object_check(ns, msqid);
  715. if (IS_ERR(msq)) {
  716. rcu_read_unlock();
  717. free_copy(copy);
  718. return PTR_ERR(msq);
  719. }
  720. for (;;) {
  721. struct msg_receiver msr_d;
  722. msg = ERR_PTR(-EACCES);
  723. if (ipcperms(ns, &msq->q_perm, S_IRUGO))
  724. goto out_unlock1;
  725. ipc_lock_object(&msq->q_perm);
  726. /* raced with RMID? */
  727. if (!ipc_valid_object(&msq->q_perm)) {
  728. msg = ERR_PTR(-EIDRM);
  729. goto out_unlock0;
  730. }
  731. msg = find_msg(msq, &msgtyp, mode);
  732. if (!IS_ERR(msg)) {
  733. /*
  734. * Found a suitable message.
  735. * Unlink it from the queue.
  736. */
  737. if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
  738. msg = ERR_PTR(-E2BIG);
  739. goto out_unlock0;
  740. }
  741. /*
  742. * If we are copying, then do not unlink message and do
  743. * not update queue parameters.
  744. */
  745. if (msgflg & MSG_COPY) {
  746. msg = copy_msg(msg, copy);
  747. goto out_unlock0;
  748. }
  749. list_del(&msg->m_list);
  750. msq->q_qnum--;
  751. msq->q_rtime = get_seconds();
  752. msq->q_lrpid = task_tgid_vnr(current);
  753. msq->q_cbytes -= msg->m_ts;
  754. atomic_sub(msg->m_ts, &ns->msg_bytes);
  755. atomic_dec(&ns->msg_hdrs);
  756. ss_wakeup(&msq->q_senders, 0);
  757. goto out_unlock0;
  758. }
  759. /* No message waiting. Wait for a message */
  760. if (msgflg & IPC_NOWAIT) {
  761. msg = ERR_PTR(-ENOMSG);
  762. goto out_unlock0;
  763. }
  764. list_add_tail(&msr_d.r_list, &msq->q_receivers);
  765. msr_d.r_tsk = current;
  766. msr_d.r_msgtype = msgtyp;
  767. msr_d.r_mode = mode;
  768. if (msgflg & MSG_NOERROR)
  769. msr_d.r_maxsize = INT_MAX;
  770. else
  771. msr_d.r_maxsize = bufsz;
  772. msr_d.r_msg = ERR_PTR(-EAGAIN);
  773. __set_current_state(TASK_INTERRUPTIBLE);
  774. ipc_unlock_object(&msq->q_perm);
  775. rcu_read_unlock();
  776. schedule();
  777. /* Lockless receive, part 1:
  778. * Disable preemption. We don't hold a reference to the queue
  779. * and getting a reference would defeat the idea of a lockless
  780. * operation, thus the code relies on rcu to guarantee the
  781. * existence of msq:
  782. * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
  783. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
  784. * rcu_read_lock() prevents preemption between reading r_msg
  785. * and acquiring the q_perm.lock in ipc_lock_object().
  786. */
  787. rcu_read_lock();
  788. /* Lockless receive, part 2:
  789. * Wait until pipelined_send or expunge_all are outside of
  790. * wake_up_process(). There is a race with exit(), see
  791. * ipc/mqueue.c for the details.
  792. */
  793. msg = (struct msg_msg *)msr_d.r_msg;
  794. while (msg == NULL) {
  795. cpu_relax();
  796. msg = (struct msg_msg *)msr_d.r_msg;
  797. }
  798. /* Lockless receive, part 3:
  799. * If there is a message or an error then accept it without
  800. * locking.
  801. */
  802. if (msg != ERR_PTR(-EAGAIN))
  803. goto out_unlock1;
  804. /* Lockless receive, part 3:
  805. * Acquire the queue spinlock.
  806. */
  807. ipc_lock_object(&msq->q_perm);
  808. /* Lockless receive, part 4:
  809. * Repeat test after acquiring the spinlock.
  810. */
  811. msg = (struct msg_msg *)msr_d.r_msg;
  812. if (msg != ERR_PTR(-EAGAIN))
  813. goto out_unlock0;
  814. list_del(&msr_d.r_list);
  815. if (signal_pending(current)) {
  816. msg = ERR_PTR(-ERESTARTNOHAND);
  817. goto out_unlock0;
  818. }
  819. ipc_unlock_object(&msq->q_perm);
  820. }
  821. out_unlock0:
  822. ipc_unlock_object(&msq->q_perm);
  823. out_unlock1:
  824. rcu_read_unlock();
  825. if (IS_ERR(msg)) {
  826. free_copy(copy);
  827. return PTR_ERR(msg);
  828. }
  829. bufsz = msg_handler(buf, msg, bufsz);
  830. free_msg(msg);
  831. return bufsz;
  832. }
  833. SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
  834. long, msgtyp, int, msgflg)
  835. {
  836. return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
  837. }
  838. /*
  839. * Scale msgmni with the available lowmem size: the memory dedicated to msg
  840. * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
  841. * Also take into account the number of nsproxies created so far.
  842. * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
  843. */
  844. void recompute_msgmni(struct ipc_namespace *ns)
  845. {
  846. struct sysinfo i;
  847. unsigned long allowed;
  848. int nb_ns;
  849. si_meminfo(&i);
  850. allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
  851. / MSGMNB;
  852. nb_ns = atomic_read(&nr_ipc_ns);
  853. allowed /= nb_ns;
  854. if (allowed < MSGMNI) {
  855. ns->msg_ctlmni = MSGMNI;
  856. return;
  857. }
  858. if (allowed > IPCMNI / nb_ns) {
  859. ns->msg_ctlmni = IPCMNI / nb_ns;
  860. return;
  861. }
  862. ns->msg_ctlmni = allowed;
  863. }
  864. void msg_init_ns(struct ipc_namespace *ns)
  865. {
  866. ns->msg_ctlmax = MSGMAX;
  867. ns->msg_ctlmnb = MSGMNB;
  868. recompute_msgmni(ns);
  869. atomic_set(&ns->msg_bytes, 0);
  870. atomic_set(&ns->msg_hdrs, 0);
  871. ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
  872. }
  873. #ifdef CONFIG_IPC_NS
  874. void msg_exit_ns(struct ipc_namespace *ns)
  875. {
  876. free_ipcs(ns, &msg_ids(ns), freeque);
  877. idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
  878. }
  879. #endif
  880. #ifdef CONFIG_PROC_FS
  881. static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
  882. {
  883. struct user_namespace *user_ns = seq_user_ns(s);
  884. struct msg_queue *msq = it;
  885. return seq_printf(s,
  886. "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
  887. msq->q_perm.key,
  888. msq->q_perm.id,
  889. msq->q_perm.mode,
  890. msq->q_cbytes,
  891. msq->q_qnum,
  892. msq->q_lspid,
  893. msq->q_lrpid,
  894. from_kuid_munged(user_ns, msq->q_perm.uid),
  895. from_kgid_munged(user_ns, msq->q_perm.gid),
  896. from_kuid_munged(user_ns, msq->q_perm.cuid),
  897. from_kgid_munged(user_ns, msq->q_perm.cgid),
  898. msq->q_stime,
  899. msq->q_rtime,
  900. msq->q_ctime);
  901. }
  902. #endif
  903. void __init msg_init(void)
  904. {
  905. msg_init_ns(&init_ipc_ns);
  906. printk(KERN_INFO "msgmni has been set to %d\n",
  907. init_ipc_ns.msg_ctlmni);
  908. ipc_init_proc_interface("sysvipc/msg",
  909. " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
  910. IPC_MSG_IDS, sysvipc_msg_proc_show);
  911. }