mqueue.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644
  1. /*
  2. * POSIX message queues filesystem for Linux.
  3. *
  4. * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
  5. * Michal Wronski (michal.wronski@gmail.com)
  6. *
  7. * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
  8. * Lockless receive & send, fd based notify:
  9. * Manfred Spraul (manfred@colorfullife.com)
  10. *
  11. * Audit: George Wilson (ltcgcw@us.ibm.com)
  12. *
  13. * This file is released under the GPL.
  14. */
  15. #include <linux/capability.h>
  16. #include <linux/init.h>
  17. #include <linux/pagemap.h>
  18. #include <linux/file.h>
  19. #include <linux/mount.h>
  20. #include <linux/namei.h>
  21. #include <linux/sysctl.h>
  22. #include <linux/poll.h>
  23. #include <linux/mqueue.h>
  24. #include <linux/msg.h>
  25. #include <linux/skbuff.h>
  26. #include <linux/vmalloc.h>
  27. #include <linux/netlink.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/audit.h>
  30. #include <linux/signal.h>
  31. #include <linux/mutex.h>
  32. #include <linux/nsproxy.h>
  33. #include <linux/pid.h>
  34. #include <linux/ipc_namespace.h>
  35. #include <linux/user_namespace.h>
  36. #include <linux/slab.h>
  37. #include <linux/sched/wake_q.h>
  38. #include <linux/sched/signal.h>
  39. #include <linux/sched/user.h>
  40. #include <net/sock.h>
  41. #include "util.h"
  42. #define MQUEUE_MAGIC 0x19800202
  43. #define DIRENT_SIZE 20
  44. #define FILENT_SIZE 80
  45. #define SEND 0
  46. #define RECV 1
  47. #define STATE_NONE 0
  48. #define STATE_READY 1
  49. struct posix_msg_tree_node {
  50. struct rb_node rb_node;
  51. struct list_head msg_list;
  52. int priority;
  53. };
  54. struct ext_wait_queue { /* queue of sleeping tasks */
  55. struct task_struct *task;
  56. struct list_head list;
  57. struct msg_msg *msg; /* ptr of loaded message */
  58. int state; /* one of STATE_* values */
  59. };
  60. struct mqueue_inode_info {
  61. spinlock_t lock;
  62. struct inode vfs_inode;
  63. wait_queue_head_t wait_q;
  64. struct rb_root msg_tree;
  65. struct posix_msg_tree_node *node_cache;
  66. struct mq_attr attr;
  67. struct sigevent notify;
  68. struct pid *notify_owner;
  69. struct user_namespace *notify_user_ns;
  70. struct user_struct *user; /* user who created, for accounting */
  71. struct sock *notify_sock;
  72. struct sk_buff *notify_cookie;
  73. /* for tasks waiting for free space and messages, respectively */
  74. struct ext_wait_queue e_wait_q[2];
  75. unsigned long qsize; /* size of queue in memory (sum of all msgs) */
  76. };
  77. static const struct inode_operations mqueue_dir_inode_operations;
  78. static const struct file_operations mqueue_file_operations;
  79. static const struct super_operations mqueue_super_ops;
  80. static void remove_notification(struct mqueue_inode_info *info);
  81. static struct kmem_cache *mqueue_inode_cachep;
  82. static struct ctl_table_header *mq_sysctl_table;
  83. static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
  84. {
  85. return container_of(inode, struct mqueue_inode_info, vfs_inode);
  86. }
  87. /*
  88. * This routine should be called with the mq_lock held.
  89. */
  90. static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
  91. {
  92. return get_ipc_ns(inode->i_sb->s_fs_info);
  93. }
  94. static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
  95. {
  96. struct ipc_namespace *ns;
  97. spin_lock(&mq_lock);
  98. ns = __get_ns_from_inode(inode);
  99. spin_unlock(&mq_lock);
  100. return ns;
  101. }
  102. /* Auxiliary functions to manipulate messages' list */
  103. static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
  104. {
  105. struct rb_node **p, *parent = NULL;
  106. struct posix_msg_tree_node *leaf;
  107. p = &info->msg_tree.rb_node;
  108. while (*p) {
  109. parent = *p;
  110. leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
  111. if (likely(leaf->priority == msg->m_type))
  112. goto insert_msg;
  113. else if (msg->m_type < leaf->priority)
  114. p = &(*p)->rb_left;
  115. else
  116. p = &(*p)->rb_right;
  117. }
  118. if (info->node_cache) {
  119. leaf = info->node_cache;
  120. info->node_cache = NULL;
  121. } else {
  122. leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
  123. if (!leaf)
  124. return -ENOMEM;
  125. INIT_LIST_HEAD(&leaf->msg_list);
  126. }
  127. leaf->priority = msg->m_type;
  128. rb_link_node(&leaf->rb_node, parent, p);
  129. rb_insert_color(&leaf->rb_node, &info->msg_tree);
  130. insert_msg:
  131. info->attr.mq_curmsgs++;
  132. info->qsize += msg->m_ts;
  133. list_add_tail(&msg->m_list, &leaf->msg_list);
  134. return 0;
  135. }
  136. static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
  137. {
  138. struct rb_node **p, *parent = NULL;
  139. struct posix_msg_tree_node *leaf;
  140. struct msg_msg *msg;
  141. try_again:
  142. p = &info->msg_tree.rb_node;
  143. while (*p) {
  144. parent = *p;
  145. /*
  146. * During insert, low priorities go to the left and high to the
  147. * right. On receive, we want the highest priorities first, so
  148. * walk all the way to the right.
  149. */
  150. p = &(*p)->rb_right;
  151. }
  152. if (!parent) {
  153. if (info->attr.mq_curmsgs) {
  154. pr_warn_once("Inconsistency in POSIX message queue, "
  155. "no tree element, but supposedly messages "
  156. "should exist!\n");
  157. info->attr.mq_curmsgs = 0;
  158. }
  159. return NULL;
  160. }
  161. leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
  162. if (unlikely(list_empty(&leaf->msg_list))) {
  163. pr_warn_once("Inconsistency in POSIX message queue, "
  164. "empty leaf node but we haven't implemented "
  165. "lazy leaf delete!\n");
  166. rb_erase(&leaf->rb_node, &info->msg_tree);
  167. if (info->node_cache) {
  168. kfree(leaf);
  169. } else {
  170. info->node_cache = leaf;
  171. }
  172. goto try_again;
  173. } else {
  174. msg = list_first_entry(&leaf->msg_list,
  175. struct msg_msg, m_list);
  176. list_del(&msg->m_list);
  177. if (list_empty(&leaf->msg_list)) {
  178. rb_erase(&leaf->rb_node, &info->msg_tree);
  179. if (info->node_cache) {
  180. kfree(leaf);
  181. } else {
  182. info->node_cache = leaf;
  183. }
  184. }
  185. }
  186. info->attr.mq_curmsgs--;
  187. info->qsize -= msg->m_ts;
  188. return msg;
  189. }
  190. static struct inode *mqueue_get_inode(struct super_block *sb,
  191. struct ipc_namespace *ipc_ns, umode_t mode,
  192. struct mq_attr *attr)
  193. {
  194. struct user_struct *u = current_user();
  195. struct inode *inode;
  196. int ret = -ENOMEM;
  197. inode = new_inode(sb);
  198. if (!inode)
  199. goto err;
  200. inode->i_ino = get_next_ino();
  201. inode->i_mode = mode;
  202. inode->i_uid = current_fsuid();
  203. inode->i_gid = current_fsgid();
  204. inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
  205. if (S_ISREG(mode)) {
  206. struct mqueue_inode_info *info;
  207. unsigned long mq_bytes, mq_treesize;
  208. inode->i_fop = &mqueue_file_operations;
  209. inode->i_size = FILENT_SIZE;
  210. /* mqueue specific info */
  211. info = MQUEUE_I(inode);
  212. spin_lock_init(&info->lock);
  213. init_waitqueue_head(&info->wait_q);
  214. INIT_LIST_HEAD(&info->e_wait_q[0].list);
  215. INIT_LIST_HEAD(&info->e_wait_q[1].list);
  216. info->notify_owner = NULL;
  217. info->notify_user_ns = NULL;
  218. info->qsize = 0;
  219. info->user = NULL; /* set when all is ok */
  220. info->msg_tree = RB_ROOT;
  221. info->node_cache = NULL;
  222. memset(&info->attr, 0, sizeof(info->attr));
  223. info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
  224. ipc_ns->mq_msg_default);
  225. info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
  226. ipc_ns->mq_msgsize_default);
  227. if (attr) {
  228. info->attr.mq_maxmsg = attr->mq_maxmsg;
  229. info->attr.mq_msgsize = attr->mq_msgsize;
  230. }
  231. /*
  232. * We used to allocate a static array of pointers and account
  233. * the size of that array as well as one msg_msg struct per
  234. * possible message into the queue size. That's no longer
  235. * accurate as the queue is now an rbtree and will grow and
  236. * shrink depending on usage patterns. We can, however, still
  237. * account one msg_msg struct per message, but the nodes are
  238. * allocated depending on priority usage, and most programs
  239. * only use one, or a handful, of priorities. However, since
  240. * this is pinned memory, we need to assume worst case, so
  241. * that means the min(mq_maxmsg, max_priorities) * struct
  242. * posix_msg_tree_node.
  243. */
  244. mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
  245. min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
  246. sizeof(struct posix_msg_tree_node);
  247. mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
  248. info->attr.mq_msgsize);
  249. spin_lock(&mq_lock);
  250. if (u->mq_bytes + mq_bytes < u->mq_bytes ||
  251. u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
  252. spin_unlock(&mq_lock);
  253. /* mqueue_evict_inode() releases info->messages */
  254. ret = -EMFILE;
  255. goto out_inode;
  256. }
  257. u->mq_bytes += mq_bytes;
  258. spin_unlock(&mq_lock);
  259. /* all is ok */
  260. info->user = get_uid(u);
  261. } else if (S_ISDIR(mode)) {
  262. inc_nlink(inode);
  263. /* Some things misbehave if size == 0 on a directory */
  264. inode->i_size = 2 * DIRENT_SIZE;
  265. inode->i_op = &mqueue_dir_inode_operations;
  266. inode->i_fop = &simple_dir_operations;
  267. }
  268. return inode;
  269. out_inode:
  270. iput(inode);
  271. err:
  272. return ERR_PTR(ret);
  273. }
  274. static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
  275. {
  276. struct inode *inode;
  277. struct ipc_namespace *ns = sb->s_fs_info;
  278. sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
  279. sb->s_blocksize = PAGE_SIZE;
  280. sb->s_blocksize_bits = PAGE_SHIFT;
  281. sb->s_magic = MQUEUE_MAGIC;
  282. sb->s_op = &mqueue_super_ops;
  283. inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
  284. if (IS_ERR(inode))
  285. return PTR_ERR(inode);
  286. sb->s_root = d_make_root(inode);
  287. if (!sb->s_root)
  288. return -ENOMEM;
  289. return 0;
  290. }
  291. static struct dentry *mqueue_mount(struct file_system_type *fs_type,
  292. int flags, const char *dev_name,
  293. void *data)
  294. {
  295. struct ipc_namespace *ns;
  296. if (flags & MS_KERNMOUNT) {
  297. ns = data;
  298. data = NULL;
  299. } else {
  300. ns = current->nsproxy->ipc_ns;
  301. }
  302. return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super);
  303. }
  304. static void init_once(void *foo)
  305. {
  306. struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
  307. inode_init_once(&p->vfs_inode);
  308. }
  309. static struct inode *mqueue_alloc_inode(struct super_block *sb)
  310. {
  311. struct mqueue_inode_info *ei;
  312. ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
  313. if (!ei)
  314. return NULL;
  315. return &ei->vfs_inode;
  316. }
  317. static void mqueue_i_callback(struct rcu_head *head)
  318. {
  319. struct inode *inode = container_of(head, struct inode, i_rcu);
  320. kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
  321. }
  322. static void mqueue_destroy_inode(struct inode *inode)
  323. {
  324. call_rcu(&inode->i_rcu, mqueue_i_callback);
  325. }
  326. static void mqueue_evict_inode(struct inode *inode)
  327. {
  328. struct mqueue_inode_info *info;
  329. struct user_struct *user;
  330. unsigned long mq_bytes, mq_treesize;
  331. struct ipc_namespace *ipc_ns;
  332. struct msg_msg *msg;
  333. clear_inode(inode);
  334. if (S_ISDIR(inode->i_mode))
  335. return;
  336. ipc_ns = get_ns_from_inode(inode);
  337. info = MQUEUE_I(inode);
  338. spin_lock(&info->lock);
  339. while ((msg = msg_get(info)) != NULL)
  340. free_msg(msg);
  341. kfree(info->node_cache);
  342. spin_unlock(&info->lock);
  343. /* Total amount of bytes accounted for the mqueue */
  344. mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
  345. min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
  346. sizeof(struct posix_msg_tree_node);
  347. mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
  348. info->attr.mq_msgsize);
  349. user = info->user;
  350. if (user) {
  351. spin_lock(&mq_lock);
  352. user->mq_bytes -= mq_bytes;
  353. /*
  354. * get_ns_from_inode() ensures that the
  355. * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
  356. * to which we now hold a reference, or it is NULL.
  357. * We can't put it here under mq_lock, though.
  358. */
  359. if (ipc_ns)
  360. ipc_ns->mq_queues_count--;
  361. spin_unlock(&mq_lock);
  362. free_uid(user);
  363. }
  364. if (ipc_ns)
  365. put_ipc_ns(ipc_ns);
  366. }
  367. static int mqueue_create(struct inode *dir, struct dentry *dentry,
  368. umode_t mode, bool excl)
  369. {
  370. struct inode *inode;
  371. struct mq_attr *attr = dentry->d_fsdata;
  372. int error;
  373. struct ipc_namespace *ipc_ns;
  374. spin_lock(&mq_lock);
  375. ipc_ns = __get_ns_from_inode(dir);
  376. if (!ipc_ns) {
  377. error = -EACCES;
  378. goto out_unlock;
  379. }
  380. if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
  381. !capable(CAP_SYS_RESOURCE)) {
  382. error = -ENOSPC;
  383. goto out_unlock;
  384. }
  385. ipc_ns->mq_queues_count++;
  386. spin_unlock(&mq_lock);
  387. inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
  388. if (IS_ERR(inode)) {
  389. error = PTR_ERR(inode);
  390. spin_lock(&mq_lock);
  391. ipc_ns->mq_queues_count--;
  392. goto out_unlock;
  393. }
  394. put_ipc_ns(ipc_ns);
  395. dir->i_size += DIRENT_SIZE;
  396. dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
  397. d_instantiate(dentry, inode);
  398. dget(dentry);
  399. return 0;
  400. out_unlock:
  401. spin_unlock(&mq_lock);
  402. if (ipc_ns)
  403. put_ipc_ns(ipc_ns);
  404. return error;
  405. }
  406. static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
  407. {
  408. struct inode *inode = d_inode(dentry);
  409. dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
  410. dir->i_size -= DIRENT_SIZE;
  411. drop_nlink(inode);
  412. dput(dentry);
  413. return 0;
  414. }
  415. /*
  416. * This is routine for system read from queue file.
  417. * To avoid mess with doing here some sort of mq_receive we allow
  418. * to read only queue size & notification info (the only values
  419. * that are interesting from user point of view and aren't accessible
  420. * through std routines)
  421. */
  422. static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
  423. size_t count, loff_t *off)
  424. {
  425. struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
  426. char buffer[FILENT_SIZE];
  427. ssize_t ret;
  428. spin_lock(&info->lock);
  429. snprintf(buffer, sizeof(buffer),
  430. "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
  431. info->qsize,
  432. info->notify_owner ? info->notify.sigev_notify : 0,
  433. (info->notify_owner &&
  434. info->notify.sigev_notify == SIGEV_SIGNAL) ?
  435. info->notify.sigev_signo : 0,
  436. pid_vnr(info->notify_owner));
  437. spin_unlock(&info->lock);
  438. buffer[sizeof(buffer)-1] = '\0';
  439. ret = simple_read_from_buffer(u_data, count, off, buffer,
  440. strlen(buffer));
  441. if (ret <= 0)
  442. return ret;
  443. file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
  444. return ret;
  445. }
  446. static int mqueue_flush_file(struct file *filp, fl_owner_t id)
  447. {
  448. struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
  449. spin_lock(&info->lock);
  450. if (task_tgid(current) == info->notify_owner)
  451. remove_notification(info);
  452. spin_unlock(&info->lock);
  453. return 0;
  454. }
  455. static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
  456. {
  457. struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
  458. int retval = 0;
  459. poll_wait(filp, &info->wait_q, poll_tab);
  460. spin_lock(&info->lock);
  461. if (info->attr.mq_curmsgs)
  462. retval = POLLIN | POLLRDNORM;
  463. if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
  464. retval |= POLLOUT | POLLWRNORM;
  465. spin_unlock(&info->lock);
  466. return retval;
  467. }
  468. /* Adds current to info->e_wait_q[sr] before element with smaller prio */
  469. static void wq_add(struct mqueue_inode_info *info, int sr,
  470. struct ext_wait_queue *ewp)
  471. {
  472. struct ext_wait_queue *walk;
  473. ewp->task = current;
  474. list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
  475. if (walk->task->static_prio <= current->static_prio) {
  476. list_add_tail(&ewp->list, &walk->list);
  477. return;
  478. }
  479. }
  480. list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
  481. }
  482. /*
  483. * Puts current task to sleep. Caller must hold queue lock. After return
  484. * lock isn't held.
  485. * sr: SEND or RECV
  486. */
  487. static int wq_sleep(struct mqueue_inode_info *info, int sr,
  488. ktime_t *timeout, struct ext_wait_queue *ewp)
  489. __releases(&info->lock)
  490. {
  491. int retval;
  492. signed long time;
  493. wq_add(info, sr, ewp);
  494. for (;;) {
  495. __set_current_state(TASK_INTERRUPTIBLE);
  496. spin_unlock(&info->lock);
  497. time = schedule_hrtimeout_range_clock(timeout, 0,
  498. HRTIMER_MODE_ABS, CLOCK_REALTIME);
  499. if (ewp->state == STATE_READY) {
  500. retval = 0;
  501. goto out;
  502. }
  503. spin_lock(&info->lock);
  504. if (ewp->state == STATE_READY) {
  505. retval = 0;
  506. goto out_unlock;
  507. }
  508. if (signal_pending(current)) {
  509. retval = -ERESTARTSYS;
  510. break;
  511. }
  512. if (time == 0) {
  513. retval = -ETIMEDOUT;
  514. break;
  515. }
  516. }
  517. list_del(&ewp->list);
  518. out_unlock:
  519. spin_unlock(&info->lock);
  520. out:
  521. return retval;
  522. }
  523. /*
  524. * Returns waiting task that should be serviced first or NULL if none exists
  525. */
  526. static struct ext_wait_queue *wq_get_first_waiter(
  527. struct mqueue_inode_info *info, int sr)
  528. {
  529. struct list_head *ptr;
  530. ptr = info->e_wait_q[sr].list.prev;
  531. if (ptr == &info->e_wait_q[sr].list)
  532. return NULL;
  533. return list_entry(ptr, struct ext_wait_queue, list);
  534. }
  535. static inline void set_cookie(struct sk_buff *skb, char code)
  536. {
  537. ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
  538. }
  539. /*
  540. * The next function is only to split too long sys_mq_timedsend
  541. */
  542. static void __do_notify(struct mqueue_inode_info *info)
  543. {
  544. /* notification
  545. * invoked when there is registered process and there isn't process
  546. * waiting synchronously for message AND state of queue changed from
  547. * empty to not empty. Here we are sure that no one is waiting
  548. * synchronously. */
  549. if (info->notify_owner &&
  550. info->attr.mq_curmsgs == 1) {
  551. struct siginfo sig_i;
  552. switch (info->notify.sigev_notify) {
  553. case SIGEV_NONE:
  554. break;
  555. case SIGEV_SIGNAL:
  556. /* sends signal */
  557. sig_i.si_signo = info->notify.sigev_signo;
  558. sig_i.si_errno = 0;
  559. sig_i.si_code = SI_MESGQ;
  560. sig_i.si_value = info->notify.sigev_value;
  561. /* map current pid/uid into info->owner's namespaces */
  562. rcu_read_lock();
  563. sig_i.si_pid = task_tgid_nr_ns(current,
  564. ns_of_pid(info->notify_owner));
  565. sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
  566. rcu_read_unlock();
  567. kill_pid_info(info->notify.sigev_signo,
  568. &sig_i, info->notify_owner);
  569. break;
  570. case SIGEV_THREAD:
  571. set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
  572. netlink_sendskb(info->notify_sock, info->notify_cookie);
  573. break;
  574. }
  575. /* after notification unregisters process */
  576. put_pid(info->notify_owner);
  577. put_user_ns(info->notify_user_ns);
  578. info->notify_owner = NULL;
  579. info->notify_user_ns = NULL;
  580. }
  581. wake_up(&info->wait_q);
  582. }
  583. static int prepare_timeout(const struct timespec __user *u_abs_timeout,
  584. struct timespec *ts)
  585. {
  586. if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
  587. return -EFAULT;
  588. if (!timespec_valid(ts))
  589. return -EINVAL;
  590. return 0;
  591. }
  592. static void remove_notification(struct mqueue_inode_info *info)
  593. {
  594. if (info->notify_owner != NULL &&
  595. info->notify.sigev_notify == SIGEV_THREAD) {
  596. set_cookie(info->notify_cookie, NOTIFY_REMOVED);
  597. netlink_sendskb(info->notify_sock, info->notify_cookie);
  598. }
  599. put_pid(info->notify_owner);
  600. put_user_ns(info->notify_user_ns);
  601. info->notify_owner = NULL;
  602. info->notify_user_ns = NULL;
  603. }
  604. static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
  605. {
  606. int mq_treesize;
  607. unsigned long total_size;
  608. if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
  609. return -EINVAL;
  610. if (capable(CAP_SYS_RESOURCE)) {
  611. if (attr->mq_maxmsg > HARD_MSGMAX ||
  612. attr->mq_msgsize > HARD_MSGSIZEMAX)
  613. return -EINVAL;
  614. } else {
  615. if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
  616. attr->mq_msgsize > ipc_ns->mq_msgsize_max)
  617. return -EINVAL;
  618. }
  619. /* check for overflow */
  620. if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
  621. return -EOVERFLOW;
  622. mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
  623. min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
  624. sizeof(struct posix_msg_tree_node);
  625. total_size = attr->mq_maxmsg * attr->mq_msgsize;
  626. if (total_size + mq_treesize < total_size)
  627. return -EOVERFLOW;
  628. return 0;
  629. }
  630. /*
  631. * Invoked when creating a new queue via sys_mq_open
  632. */
  633. static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
  634. struct path *path, int oflag, umode_t mode,
  635. struct mq_attr *attr)
  636. {
  637. const struct cred *cred = current_cred();
  638. int ret;
  639. if (attr) {
  640. ret = mq_attr_ok(ipc_ns, attr);
  641. if (ret)
  642. return ERR_PTR(ret);
  643. /* store for use during create */
  644. path->dentry->d_fsdata = attr;
  645. } else {
  646. struct mq_attr def_attr;
  647. def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
  648. ipc_ns->mq_msg_default);
  649. def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
  650. ipc_ns->mq_msgsize_default);
  651. ret = mq_attr_ok(ipc_ns, &def_attr);
  652. if (ret)
  653. return ERR_PTR(ret);
  654. }
  655. mode &= ~current_umask();
  656. ret = vfs_create(dir, path->dentry, mode, true);
  657. path->dentry->d_fsdata = NULL;
  658. if (ret)
  659. return ERR_PTR(ret);
  660. return dentry_open(path, oflag, cred);
  661. }
  662. /* Opens existing queue */
  663. static struct file *do_open(struct path *path, int oflag)
  664. {
  665. static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
  666. MAY_READ | MAY_WRITE };
  667. int acc;
  668. if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
  669. return ERR_PTR(-EINVAL);
  670. acc = oflag2acc[oflag & O_ACCMODE];
  671. if (inode_permission(d_inode(path->dentry), acc))
  672. return ERR_PTR(-EACCES);
  673. return dentry_open(path, oflag, current_cred());
  674. }
  675. static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
  676. struct mq_attr *attr)
  677. {
  678. struct path path;
  679. struct file *filp;
  680. struct filename *name;
  681. int fd, error;
  682. struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
  683. struct vfsmount *mnt = ipc_ns->mq_mnt;
  684. struct dentry *root = mnt->mnt_root;
  685. int ro;
  686. audit_mq_open(oflag, mode, attr);
  687. if (IS_ERR(name = getname(u_name)))
  688. return PTR_ERR(name);
  689. fd = get_unused_fd_flags(O_CLOEXEC);
  690. if (fd < 0)
  691. goto out_putname;
  692. ro = mnt_want_write(mnt); /* we'll drop it in any case */
  693. error = 0;
  694. inode_lock(d_inode(root));
  695. path.dentry = lookup_one_len(name->name, root, strlen(name->name));
  696. if (IS_ERR(path.dentry)) {
  697. error = PTR_ERR(path.dentry);
  698. goto out_putfd;
  699. }
  700. path.mnt = mntget(mnt);
  701. if (oflag & O_CREAT) {
  702. if (d_really_is_positive(path.dentry)) { /* entry already exists */
  703. audit_inode(name, path.dentry, 0);
  704. if (oflag & O_EXCL) {
  705. error = -EEXIST;
  706. goto out;
  707. }
  708. filp = do_open(&path, oflag);
  709. } else {
  710. if (ro) {
  711. error = ro;
  712. goto out;
  713. }
  714. audit_inode_parent_hidden(name, root);
  715. filp = do_create(ipc_ns, d_inode(root), &path,
  716. oflag, mode, attr);
  717. }
  718. } else {
  719. if (d_really_is_negative(path.dentry)) {
  720. error = -ENOENT;
  721. goto out;
  722. }
  723. audit_inode(name, path.dentry, 0);
  724. filp = do_open(&path, oflag);
  725. }
  726. if (!IS_ERR(filp))
  727. fd_install(fd, filp);
  728. else
  729. error = PTR_ERR(filp);
  730. out:
  731. path_put(&path);
  732. out_putfd:
  733. if (error) {
  734. put_unused_fd(fd);
  735. fd = error;
  736. }
  737. inode_unlock(d_inode(root));
  738. if (!ro)
  739. mnt_drop_write(mnt);
  740. out_putname:
  741. putname(name);
  742. return fd;
  743. }
  744. SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
  745. struct mq_attr __user *, u_attr)
  746. {
  747. struct mq_attr attr;
  748. if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
  749. return -EFAULT;
  750. return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
  751. }
  752. SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
  753. {
  754. int err;
  755. struct filename *name;
  756. struct dentry *dentry;
  757. struct inode *inode = NULL;
  758. struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
  759. struct vfsmount *mnt = ipc_ns->mq_mnt;
  760. name = getname(u_name);
  761. if (IS_ERR(name))
  762. return PTR_ERR(name);
  763. audit_inode_parent_hidden(name, mnt->mnt_root);
  764. err = mnt_want_write(mnt);
  765. if (err)
  766. goto out_name;
  767. inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
  768. dentry = lookup_one_len(name->name, mnt->mnt_root,
  769. strlen(name->name));
  770. if (IS_ERR(dentry)) {
  771. err = PTR_ERR(dentry);
  772. goto out_unlock;
  773. }
  774. inode = d_inode(dentry);
  775. if (!inode) {
  776. err = -ENOENT;
  777. } else {
  778. ihold(inode);
  779. err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
  780. }
  781. dput(dentry);
  782. out_unlock:
  783. inode_unlock(d_inode(mnt->mnt_root));
  784. if (inode)
  785. iput(inode);
  786. mnt_drop_write(mnt);
  787. out_name:
  788. putname(name);
  789. return err;
  790. }
  791. /* Pipelined send and receive functions.
  792. *
  793. * If a receiver finds no waiting message, then it registers itself in the
  794. * list of waiting receivers. A sender checks that list before adding the new
  795. * message into the message array. If there is a waiting receiver, then it
  796. * bypasses the message array and directly hands the message over to the
  797. * receiver. The receiver accepts the message and returns without grabbing the
  798. * queue spinlock:
  799. *
  800. * - Set pointer to message.
  801. * - Queue the receiver task for later wakeup (without the info->lock).
  802. * - Update its state to STATE_READY. Now the receiver can continue.
  803. * - Wake up the process after the lock is dropped. Should the process wake up
  804. * before this wakeup (due to a timeout or a signal) it will either see
  805. * STATE_READY and continue or acquire the lock to check the state again.
  806. *
  807. * The same algorithm is used for senders.
  808. */
  809. /* pipelined_send() - send a message directly to the task waiting in
  810. * sys_mq_timedreceive() (without inserting message into a queue).
  811. */
  812. static inline void pipelined_send(struct wake_q_head *wake_q,
  813. struct mqueue_inode_info *info,
  814. struct msg_msg *message,
  815. struct ext_wait_queue *receiver)
  816. {
  817. receiver->msg = message;
  818. list_del(&receiver->list);
  819. wake_q_add(wake_q, receiver->task);
  820. /*
  821. * Rely on the implicit cmpxchg barrier from wake_q_add such
  822. * that we can ensure that updating receiver->state is the last
  823. * write operation: As once set, the receiver can continue,
  824. * and if we don't have the reference count from the wake_q,
  825. * yet, at that point we can later have a use-after-free
  826. * condition and bogus wakeup.
  827. */
  828. receiver->state = STATE_READY;
  829. }
  830. /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
  831. * gets its message and put to the queue (we have one free place for sure). */
  832. static inline void pipelined_receive(struct wake_q_head *wake_q,
  833. struct mqueue_inode_info *info)
  834. {
  835. struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
  836. if (!sender) {
  837. /* for poll */
  838. wake_up_interruptible(&info->wait_q);
  839. return;
  840. }
  841. if (msg_insert(sender->msg, info))
  842. return;
  843. list_del(&sender->list);
  844. wake_q_add(wake_q, sender->task);
  845. sender->state = STATE_READY;
  846. }
  847. static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
  848. size_t msg_len, unsigned int msg_prio,
  849. struct timespec *ts)
  850. {
  851. struct fd f;
  852. struct inode *inode;
  853. struct ext_wait_queue wait;
  854. struct ext_wait_queue *receiver;
  855. struct msg_msg *msg_ptr;
  856. struct mqueue_inode_info *info;
  857. ktime_t expires, *timeout = NULL;
  858. struct posix_msg_tree_node *new_leaf = NULL;
  859. int ret = 0;
  860. DEFINE_WAKE_Q(wake_q);
  861. if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
  862. return -EINVAL;
  863. if (ts) {
  864. expires = timespec_to_ktime(*ts);
  865. timeout = &expires;
  866. }
  867. audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
  868. f = fdget(mqdes);
  869. if (unlikely(!f.file)) {
  870. ret = -EBADF;
  871. goto out;
  872. }
  873. inode = file_inode(f.file);
  874. if (unlikely(f.file->f_op != &mqueue_file_operations)) {
  875. ret = -EBADF;
  876. goto out_fput;
  877. }
  878. info = MQUEUE_I(inode);
  879. audit_file(f.file);
  880. if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
  881. ret = -EBADF;
  882. goto out_fput;
  883. }
  884. if (unlikely(msg_len > info->attr.mq_msgsize)) {
  885. ret = -EMSGSIZE;
  886. goto out_fput;
  887. }
  888. /* First try to allocate memory, before doing anything with
  889. * existing queues. */
  890. msg_ptr = load_msg(u_msg_ptr, msg_len);
  891. if (IS_ERR(msg_ptr)) {
  892. ret = PTR_ERR(msg_ptr);
  893. goto out_fput;
  894. }
  895. msg_ptr->m_ts = msg_len;
  896. msg_ptr->m_type = msg_prio;
  897. /*
  898. * msg_insert really wants us to have a valid, spare node struct so
  899. * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
  900. * fall back to that if necessary.
  901. */
  902. if (!info->node_cache)
  903. new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
  904. spin_lock(&info->lock);
  905. if (!info->node_cache && new_leaf) {
  906. /* Save our speculative allocation into the cache */
  907. INIT_LIST_HEAD(&new_leaf->msg_list);
  908. info->node_cache = new_leaf;
  909. new_leaf = NULL;
  910. } else {
  911. kfree(new_leaf);
  912. }
  913. if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
  914. if (f.file->f_flags & O_NONBLOCK) {
  915. ret = -EAGAIN;
  916. } else {
  917. wait.task = current;
  918. wait.msg = (void *) msg_ptr;
  919. wait.state = STATE_NONE;
  920. ret = wq_sleep(info, SEND, timeout, &wait);
  921. /*
  922. * wq_sleep must be called with info->lock held, and
  923. * returns with the lock released
  924. */
  925. goto out_free;
  926. }
  927. } else {
  928. receiver = wq_get_first_waiter(info, RECV);
  929. if (receiver) {
  930. pipelined_send(&wake_q, info, msg_ptr, receiver);
  931. } else {
  932. /* adds message to the queue */
  933. ret = msg_insert(msg_ptr, info);
  934. if (ret)
  935. goto out_unlock;
  936. __do_notify(info);
  937. }
  938. inode->i_atime = inode->i_mtime = inode->i_ctime =
  939. current_time(inode);
  940. }
  941. out_unlock:
  942. spin_unlock(&info->lock);
  943. wake_up_q(&wake_q);
  944. out_free:
  945. if (ret)
  946. free_msg(msg_ptr);
  947. out_fput:
  948. fdput(f);
  949. out:
  950. return ret;
  951. }
  952. static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
  953. size_t msg_len, unsigned int __user *u_msg_prio,
  954. struct timespec *ts)
  955. {
  956. ssize_t ret;
  957. struct msg_msg *msg_ptr;
  958. struct fd f;
  959. struct inode *inode;
  960. struct mqueue_inode_info *info;
  961. struct ext_wait_queue wait;
  962. ktime_t expires, *timeout = NULL;
  963. struct posix_msg_tree_node *new_leaf = NULL;
  964. if (ts) {
  965. expires = timespec_to_ktime(*ts);
  966. timeout = &expires;
  967. }
  968. audit_mq_sendrecv(mqdes, msg_len, 0, ts);
  969. f = fdget(mqdes);
  970. if (unlikely(!f.file)) {
  971. ret = -EBADF;
  972. goto out;
  973. }
  974. inode = file_inode(f.file);
  975. if (unlikely(f.file->f_op != &mqueue_file_operations)) {
  976. ret = -EBADF;
  977. goto out_fput;
  978. }
  979. info = MQUEUE_I(inode);
  980. audit_file(f.file);
  981. if (unlikely(!(f.file->f_mode & FMODE_READ))) {
  982. ret = -EBADF;
  983. goto out_fput;
  984. }
  985. /* checks if buffer is big enough */
  986. if (unlikely(msg_len < info->attr.mq_msgsize)) {
  987. ret = -EMSGSIZE;
  988. goto out_fput;
  989. }
  990. /*
  991. * msg_insert really wants us to have a valid, spare node struct so
  992. * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
  993. * fall back to that if necessary.
  994. */
  995. if (!info->node_cache)
  996. new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
  997. spin_lock(&info->lock);
  998. if (!info->node_cache && new_leaf) {
  999. /* Save our speculative allocation into the cache */
  1000. INIT_LIST_HEAD(&new_leaf->msg_list);
  1001. info->node_cache = new_leaf;
  1002. } else {
  1003. kfree(new_leaf);
  1004. }
  1005. if (info->attr.mq_curmsgs == 0) {
  1006. if (f.file->f_flags & O_NONBLOCK) {
  1007. spin_unlock(&info->lock);
  1008. ret = -EAGAIN;
  1009. } else {
  1010. wait.task = current;
  1011. wait.state = STATE_NONE;
  1012. ret = wq_sleep(info, RECV, timeout, &wait);
  1013. msg_ptr = wait.msg;
  1014. }
  1015. } else {
  1016. DEFINE_WAKE_Q(wake_q);
  1017. msg_ptr = msg_get(info);
  1018. inode->i_atime = inode->i_mtime = inode->i_ctime =
  1019. current_time(inode);
  1020. /* There is now free space in queue. */
  1021. pipelined_receive(&wake_q, info);
  1022. spin_unlock(&info->lock);
  1023. wake_up_q(&wake_q);
  1024. ret = 0;
  1025. }
  1026. if (ret == 0) {
  1027. ret = msg_ptr->m_ts;
  1028. if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
  1029. store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
  1030. ret = -EFAULT;
  1031. }
  1032. free_msg(msg_ptr);
  1033. }
  1034. out_fput:
  1035. fdput(f);
  1036. out:
  1037. return ret;
  1038. }
  1039. SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
  1040. size_t, msg_len, unsigned int, msg_prio,
  1041. const struct timespec __user *, u_abs_timeout)
  1042. {
  1043. struct timespec ts, *p = NULL;
  1044. if (u_abs_timeout) {
  1045. int res = prepare_timeout(u_abs_timeout, &ts);
  1046. if (res)
  1047. return res;
  1048. p = &ts;
  1049. }
  1050. return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
  1051. }
  1052. SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
  1053. size_t, msg_len, unsigned int __user *, u_msg_prio,
  1054. const struct timespec __user *, u_abs_timeout)
  1055. {
  1056. struct timespec ts, *p = NULL;
  1057. if (u_abs_timeout) {
  1058. int res = prepare_timeout(u_abs_timeout, &ts);
  1059. if (res)
  1060. return res;
  1061. p = &ts;
  1062. }
  1063. return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
  1064. }
  1065. /*
  1066. * Notes: the case when user wants us to deregister (with NULL as pointer)
  1067. * and he isn't currently owner of notification, will be silently discarded.
  1068. * It isn't explicitly defined in the POSIX.
  1069. */
  1070. static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
  1071. {
  1072. int ret;
  1073. struct fd f;
  1074. struct sock *sock;
  1075. struct inode *inode;
  1076. struct mqueue_inode_info *info;
  1077. struct sk_buff *nc;
  1078. audit_mq_notify(mqdes, notification);
  1079. nc = NULL;
  1080. sock = NULL;
  1081. if (notification != NULL) {
  1082. if (unlikely(notification->sigev_notify != SIGEV_NONE &&
  1083. notification->sigev_notify != SIGEV_SIGNAL &&
  1084. notification->sigev_notify != SIGEV_THREAD))
  1085. return -EINVAL;
  1086. if (notification->sigev_notify == SIGEV_SIGNAL &&
  1087. !valid_signal(notification->sigev_signo)) {
  1088. return -EINVAL;
  1089. }
  1090. if (notification->sigev_notify == SIGEV_THREAD) {
  1091. long timeo;
  1092. /* create the notify skb */
  1093. nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
  1094. if (!nc) {
  1095. ret = -ENOMEM;
  1096. goto out;
  1097. }
  1098. if (copy_from_user(nc->data,
  1099. notification->sigev_value.sival_ptr,
  1100. NOTIFY_COOKIE_LEN)) {
  1101. ret = -EFAULT;
  1102. goto out;
  1103. }
  1104. /* TODO: add a header? */
  1105. skb_put(nc, NOTIFY_COOKIE_LEN);
  1106. /* and attach it to the socket */
  1107. retry:
  1108. f = fdget(notification->sigev_signo);
  1109. if (!f.file) {
  1110. ret = -EBADF;
  1111. goto out;
  1112. }
  1113. sock = netlink_getsockbyfilp(f.file);
  1114. fdput(f);
  1115. if (IS_ERR(sock)) {
  1116. ret = PTR_ERR(sock);
  1117. sock = NULL;
  1118. goto out;
  1119. }
  1120. timeo = MAX_SCHEDULE_TIMEOUT;
  1121. ret = netlink_attachskb(sock, nc, &timeo, NULL);
  1122. if (ret == 1)
  1123. goto retry;
  1124. if (ret) {
  1125. sock = NULL;
  1126. nc = NULL;
  1127. goto out;
  1128. }
  1129. }
  1130. }
  1131. f = fdget(mqdes);
  1132. if (!f.file) {
  1133. ret = -EBADF;
  1134. goto out;
  1135. }
  1136. inode = file_inode(f.file);
  1137. if (unlikely(f.file->f_op != &mqueue_file_operations)) {
  1138. ret = -EBADF;
  1139. goto out_fput;
  1140. }
  1141. info = MQUEUE_I(inode);
  1142. ret = 0;
  1143. spin_lock(&info->lock);
  1144. if (notification == NULL) {
  1145. if (info->notify_owner == task_tgid(current)) {
  1146. remove_notification(info);
  1147. inode->i_atime = inode->i_ctime = current_time(inode);
  1148. }
  1149. } else if (info->notify_owner != NULL) {
  1150. ret = -EBUSY;
  1151. } else {
  1152. switch (notification->sigev_notify) {
  1153. case SIGEV_NONE:
  1154. info->notify.sigev_notify = SIGEV_NONE;
  1155. break;
  1156. case SIGEV_THREAD:
  1157. info->notify_sock = sock;
  1158. info->notify_cookie = nc;
  1159. sock = NULL;
  1160. nc = NULL;
  1161. info->notify.sigev_notify = SIGEV_THREAD;
  1162. break;
  1163. case SIGEV_SIGNAL:
  1164. info->notify.sigev_signo = notification->sigev_signo;
  1165. info->notify.sigev_value = notification->sigev_value;
  1166. info->notify.sigev_notify = SIGEV_SIGNAL;
  1167. break;
  1168. }
  1169. info->notify_owner = get_pid(task_tgid(current));
  1170. info->notify_user_ns = get_user_ns(current_user_ns());
  1171. inode->i_atime = inode->i_ctime = current_time(inode);
  1172. }
  1173. spin_unlock(&info->lock);
  1174. out_fput:
  1175. fdput(f);
  1176. out:
  1177. if (sock)
  1178. netlink_detachskb(sock, nc);
  1179. else if (nc)
  1180. dev_kfree_skb(nc);
  1181. return ret;
  1182. }
  1183. SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
  1184. const struct sigevent __user *, u_notification)
  1185. {
  1186. struct sigevent n, *p = NULL;
  1187. if (u_notification) {
  1188. if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
  1189. return -EFAULT;
  1190. p = &n;
  1191. }
  1192. return do_mq_notify(mqdes, p);
  1193. }
  1194. static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
  1195. {
  1196. struct fd f;
  1197. struct inode *inode;
  1198. struct mqueue_inode_info *info;
  1199. if (new && (new->mq_flags & (~O_NONBLOCK)))
  1200. return -EINVAL;
  1201. f = fdget(mqdes);
  1202. if (!f.file)
  1203. return -EBADF;
  1204. if (unlikely(f.file->f_op != &mqueue_file_operations)) {
  1205. fdput(f);
  1206. return -EBADF;
  1207. }
  1208. inode = file_inode(f.file);
  1209. info = MQUEUE_I(inode);
  1210. spin_lock(&info->lock);
  1211. if (old) {
  1212. *old = info->attr;
  1213. old->mq_flags = f.file->f_flags & O_NONBLOCK;
  1214. }
  1215. if (new) {
  1216. audit_mq_getsetattr(mqdes, new);
  1217. spin_lock(&f.file->f_lock);
  1218. if (new->mq_flags & O_NONBLOCK)
  1219. f.file->f_flags |= O_NONBLOCK;
  1220. else
  1221. f.file->f_flags &= ~O_NONBLOCK;
  1222. spin_unlock(&f.file->f_lock);
  1223. inode->i_atime = inode->i_ctime = current_time(inode);
  1224. }
  1225. spin_unlock(&info->lock);
  1226. fdput(f);
  1227. return 0;
  1228. }
  1229. SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
  1230. const struct mq_attr __user *, u_mqstat,
  1231. struct mq_attr __user *, u_omqstat)
  1232. {
  1233. int ret;
  1234. struct mq_attr mqstat, omqstat;
  1235. struct mq_attr *new = NULL, *old = NULL;
  1236. if (u_mqstat) {
  1237. new = &mqstat;
  1238. if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
  1239. return -EFAULT;
  1240. }
  1241. if (u_omqstat)
  1242. old = &omqstat;
  1243. ret = do_mq_getsetattr(mqdes, new, old);
  1244. if (ret || !old)
  1245. return ret;
  1246. if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
  1247. return -EFAULT;
  1248. return 0;
  1249. }
  1250. #ifdef CONFIG_COMPAT
  1251. struct compat_mq_attr {
  1252. compat_long_t mq_flags; /* message queue flags */
  1253. compat_long_t mq_maxmsg; /* maximum number of messages */
  1254. compat_long_t mq_msgsize; /* maximum message size */
  1255. compat_long_t mq_curmsgs; /* number of messages currently queued */
  1256. compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
  1257. };
  1258. static inline int get_compat_mq_attr(struct mq_attr *attr,
  1259. const struct compat_mq_attr __user *uattr)
  1260. {
  1261. struct compat_mq_attr v;
  1262. if (copy_from_user(&v, uattr, sizeof(*uattr)))
  1263. return -EFAULT;
  1264. memset(attr, 0, sizeof(*attr));
  1265. attr->mq_flags = v.mq_flags;
  1266. attr->mq_maxmsg = v.mq_maxmsg;
  1267. attr->mq_msgsize = v.mq_msgsize;
  1268. attr->mq_curmsgs = v.mq_curmsgs;
  1269. return 0;
  1270. }
  1271. static inline int put_compat_mq_attr(const struct mq_attr *attr,
  1272. struct compat_mq_attr __user *uattr)
  1273. {
  1274. struct compat_mq_attr v;
  1275. memset(&v, 0, sizeof(v));
  1276. v.mq_flags = attr->mq_flags;
  1277. v.mq_maxmsg = attr->mq_maxmsg;
  1278. v.mq_msgsize = attr->mq_msgsize;
  1279. v.mq_curmsgs = attr->mq_curmsgs;
  1280. if (copy_to_user(uattr, &v, sizeof(*uattr)))
  1281. return -EFAULT;
  1282. return 0;
  1283. }
  1284. COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
  1285. int, oflag, compat_mode_t, mode,
  1286. struct compat_mq_attr __user *, u_attr)
  1287. {
  1288. struct mq_attr attr, *p = NULL;
  1289. if (u_attr && oflag & O_CREAT) {
  1290. p = &attr;
  1291. if (get_compat_mq_attr(&attr, u_attr))
  1292. return -EFAULT;
  1293. }
  1294. return do_mq_open(u_name, oflag, mode, p);
  1295. }
  1296. static int compat_prepare_timeout(const struct compat_timespec __user *p,
  1297. struct timespec *ts)
  1298. {
  1299. if (compat_get_timespec(ts, p))
  1300. return -EFAULT;
  1301. if (!timespec_valid(ts))
  1302. return -EINVAL;
  1303. return 0;
  1304. }
  1305. COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
  1306. const char __user *, u_msg_ptr,
  1307. compat_size_t, msg_len, unsigned int, msg_prio,
  1308. const struct compat_timespec __user *, u_abs_timeout)
  1309. {
  1310. struct timespec ts, *p = NULL;
  1311. if (u_abs_timeout) {
  1312. int res = compat_prepare_timeout(u_abs_timeout, &ts);
  1313. if (res)
  1314. return res;
  1315. p = &ts;
  1316. }
  1317. return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
  1318. }
  1319. COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes,
  1320. char __user *, u_msg_ptr,
  1321. compat_size_t, msg_len, unsigned int __user *, u_msg_prio,
  1322. const struct compat_timespec __user *, u_abs_timeout)
  1323. {
  1324. struct timespec ts, *p = NULL;
  1325. if (u_abs_timeout) {
  1326. int res = compat_prepare_timeout(u_abs_timeout, &ts);
  1327. if (res)
  1328. return res;
  1329. p = &ts;
  1330. }
  1331. return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
  1332. }
  1333. COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
  1334. const struct compat_sigevent __user *, u_notification)
  1335. {
  1336. struct sigevent n, *p = NULL;
  1337. if (u_notification) {
  1338. if (get_compat_sigevent(&n, u_notification))
  1339. return -EFAULT;
  1340. if (n.sigev_notify == SIGEV_THREAD)
  1341. n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
  1342. p = &n;
  1343. }
  1344. return do_mq_notify(mqdes, p);
  1345. }
  1346. COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
  1347. const struct compat_mq_attr __user *, u_mqstat,
  1348. struct compat_mq_attr __user *, u_omqstat)
  1349. {
  1350. int ret;
  1351. struct mq_attr mqstat, omqstat;
  1352. struct mq_attr *new = NULL, *old = NULL;
  1353. if (u_mqstat) {
  1354. new = &mqstat;
  1355. if (get_compat_mq_attr(new, u_mqstat))
  1356. return -EFAULT;
  1357. }
  1358. if (u_omqstat)
  1359. old = &omqstat;
  1360. ret = do_mq_getsetattr(mqdes, new, old);
  1361. if (ret || !old)
  1362. return ret;
  1363. if (put_compat_mq_attr(old, u_omqstat))
  1364. return -EFAULT;
  1365. return 0;
  1366. }
  1367. #endif
  1368. static const struct inode_operations mqueue_dir_inode_operations = {
  1369. .lookup = simple_lookup,
  1370. .create = mqueue_create,
  1371. .unlink = mqueue_unlink,
  1372. };
  1373. static const struct file_operations mqueue_file_operations = {
  1374. .flush = mqueue_flush_file,
  1375. .poll = mqueue_poll_file,
  1376. .read = mqueue_read_file,
  1377. .llseek = default_llseek,
  1378. };
  1379. static const struct super_operations mqueue_super_ops = {
  1380. .alloc_inode = mqueue_alloc_inode,
  1381. .destroy_inode = mqueue_destroy_inode,
  1382. .evict_inode = mqueue_evict_inode,
  1383. .statfs = simple_statfs,
  1384. };
  1385. static struct file_system_type mqueue_fs_type = {
  1386. .name = "mqueue",
  1387. .mount = mqueue_mount,
  1388. .kill_sb = kill_litter_super,
  1389. .fs_flags = FS_USERNS_MOUNT,
  1390. };
  1391. int mq_init_ns(struct ipc_namespace *ns)
  1392. {
  1393. ns->mq_queues_count = 0;
  1394. ns->mq_queues_max = DFLT_QUEUESMAX;
  1395. ns->mq_msg_max = DFLT_MSGMAX;
  1396. ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
  1397. ns->mq_msg_default = DFLT_MSG;
  1398. ns->mq_msgsize_default = DFLT_MSGSIZE;
  1399. ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
  1400. if (IS_ERR(ns->mq_mnt)) {
  1401. int err = PTR_ERR(ns->mq_mnt);
  1402. ns->mq_mnt = NULL;
  1403. return err;
  1404. }
  1405. return 0;
  1406. }
  1407. void mq_clear_sbinfo(struct ipc_namespace *ns)
  1408. {
  1409. ns->mq_mnt->mnt_sb->s_fs_info = NULL;
  1410. }
  1411. void mq_put_mnt(struct ipc_namespace *ns)
  1412. {
  1413. kern_unmount(ns->mq_mnt);
  1414. }
  1415. static int __init init_mqueue_fs(void)
  1416. {
  1417. int error;
  1418. mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
  1419. sizeof(struct mqueue_inode_info), 0,
  1420. SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
  1421. if (mqueue_inode_cachep == NULL)
  1422. return -ENOMEM;
  1423. /* ignore failures - they are not fatal */
  1424. mq_sysctl_table = mq_register_sysctl_table();
  1425. error = register_filesystem(&mqueue_fs_type);
  1426. if (error)
  1427. goto out_sysctl;
  1428. spin_lock_init(&mq_lock);
  1429. error = mq_init_ns(&init_ipc_ns);
  1430. if (error)
  1431. goto out_filesystem;
  1432. return 0;
  1433. out_filesystem:
  1434. unregister_filesystem(&mqueue_fs_type);
  1435. out_sysctl:
  1436. if (mq_sysctl_table)
  1437. unregister_sysctl_table(mq_sysctl_table);
  1438. kmem_cache_destroy(mqueue_inode_cachep);
  1439. return error;
  1440. }
  1441. device_initcall(init_mqueue_fs);