xenbus_dev_frontend.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. /*
  2. * Driver giving user-space access to the kernel's xenbus connection
  3. * to xenstore.
  4. *
  5. * Copyright (c) 2005, Christian Limpach
  6. * Copyright (c) 2005, Rusty Russell, IBM Corporation
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License version 2
  10. * as published by the Free Software Foundation; or, when distributed
  11. * separately from the Linux kernel or incorporated into other
  12. * software packages, subject to the following license:
  13. *
  14. * Permission is hereby granted, free of charge, to any person obtaining a copy
  15. * of this source file (the "Software"), to deal in the Software without
  16. * restriction, including without limitation the rights to use, copy, modify,
  17. * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  18. * and to permit persons to whom the Software is furnished to do so, subject to
  19. * the following conditions:
  20. *
  21. * The above copyright notice and this permission notice shall be included in
  22. * all copies or substantial portions of the Software.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  25. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  26. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  27. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  28. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  29. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  30. * IN THE SOFTWARE.
  31. *
  32. * Changes:
  33. * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem
  34. * and /proc/xen compatibility mount point.
  35. * Turned xenfs into a loadable module.
  36. */
  37. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38. #include <linux/kernel.h>
  39. #include <linux/errno.h>
  40. #include <linux/uio.h>
  41. #include <linux/notifier.h>
  42. #include <linux/wait.h>
  43. #include <linux/fs.h>
  44. #include <linux/poll.h>
  45. #include <linux/mutex.h>
  46. #include <linux/sched.h>
  47. #include <linux/spinlock.h>
  48. #include <linux/mount.h>
  49. #include <linux/pagemap.h>
  50. #include <linux/uaccess.h>
  51. #include <linux/init.h>
  52. #include <linux/namei.h>
  53. #include <linux/string.h>
  54. #include <linux/slab.h>
  55. #include <linux/miscdevice.h>
  56. #include <linux/init.h>
  57. #include <xen/xenbus.h>
  58. #include <xen/xen.h>
  59. #include <asm/xen/hypervisor.h>
  60. #include "xenbus.h"
  61. /*
  62. * An element of a list of outstanding transactions, for which we're
  63. * still waiting a reply.
  64. */
  65. struct xenbus_transaction_holder {
  66. struct list_head list;
  67. struct xenbus_transaction handle;
  68. };
  69. /*
  70. * A buffer of data on the queue.
  71. */
  72. struct read_buffer {
  73. struct list_head list;
  74. unsigned int cons;
  75. unsigned int len;
  76. char msg[];
  77. };
  78. struct xenbus_file_priv {
  79. /*
  80. * msgbuffer_mutex is held while partial requests are built up
  81. * and complete requests are acted on. It therefore protects
  82. * the "transactions" and "watches" lists, and the partial
  83. * request length and buffer.
  84. *
  85. * reply_mutex protects the reply being built up to return to
  86. * usermode. It nests inside msgbuffer_mutex but may be held
  87. * alone during a watch callback.
  88. */
  89. struct mutex msgbuffer_mutex;
  90. /* In-progress transactions */
  91. struct list_head transactions;
  92. /* Active watches. */
  93. struct list_head watches;
  94. /* Partial request. */
  95. unsigned int len;
  96. union {
  97. struct xsd_sockmsg msg;
  98. char buffer[XENSTORE_PAYLOAD_MAX];
  99. } u;
  100. /* Response queue. */
  101. struct mutex reply_mutex;
  102. struct list_head read_buffers;
  103. wait_queue_head_t read_waitq;
  104. };
  105. /* Read out any raw xenbus messages queued up. */
  106. static ssize_t xenbus_file_read(struct file *filp,
  107. char __user *ubuf,
  108. size_t len, loff_t *ppos)
  109. {
  110. struct xenbus_file_priv *u = filp->private_data;
  111. struct read_buffer *rb;
  112. unsigned i;
  113. int ret;
  114. mutex_lock(&u->reply_mutex);
  115. again:
  116. while (list_empty(&u->read_buffers)) {
  117. mutex_unlock(&u->reply_mutex);
  118. if (filp->f_flags & O_NONBLOCK)
  119. return -EAGAIN;
  120. ret = wait_event_interruptible(u->read_waitq,
  121. !list_empty(&u->read_buffers));
  122. if (ret)
  123. return ret;
  124. mutex_lock(&u->reply_mutex);
  125. }
  126. rb = list_entry(u->read_buffers.next, struct read_buffer, list);
  127. i = 0;
  128. while (i < len) {
  129. unsigned sz = min((unsigned)len - i, rb->len - rb->cons);
  130. ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz);
  131. i += sz - ret;
  132. rb->cons += sz - ret;
  133. if (ret != 0) {
  134. if (i == 0)
  135. i = -EFAULT;
  136. goto out;
  137. }
  138. /* Clear out buffer if it has been consumed */
  139. if (rb->cons == rb->len) {
  140. list_del(&rb->list);
  141. kfree(rb);
  142. if (list_empty(&u->read_buffers))
  143. break;
  144. rb = list_entry(u->read_buffers.next,
  145. struct read_buffer, list);
  146. }
  147. }
  148. if (i == 0)
  149. goto again;
  150. out:
  151. mutex_unlock(&u->reply_mutex);
  152. return i;
  153. }
  154. /*
  155. * Add a buffer to the queue. Caller must hold the appropriate lock
  156. * if the queue is not local. (Commonly the caller will build up
  157. * multiple queued buffers on a temporary local list, and then add it
  158. * to the appropriate list under lock once all the buffers have een
  159. * successfully allocated.)
  160. */
  161. static int queue_reply(struct list_head *queue, const void *data, size_t len)
  162. {
  163. struct read_buffer *rb;
  164. if (len == 0)
  165. return 0;
  166. if (len > XENSTORE_PAYLOAD_MAX)
  167. return -EINVAL;
  168. rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
  169. if (rb == NULL)
  170. return -ENOMEM;
  171. rb->cons = 0;
  172. rb->len = len;
  173. memcpy(rb->msg, data, len);
  174. list_add_tail(&rb->list, queue);
  175. return 0;
  176. }
  177. /*
  178. * Free all the read_buffer s on a list.
  179. * Caller must have sole reference to list.
  180. */
  181. static void queue_cleanup(struct list_head *list)
  182. {
  183. struct read_buffer *rb;
  184. while (!list_empty(list)) {
  185. rb = list_entry(list->next, struct read_buffer, list);
  186. list_del(list->next);
  187. kfree(rb);
  188. }
  189. }
  190. struct watch_adapter {
  191. struct list_head list;
  192. struct xenbus_watch watch;
  193. struct xenbus_file_priv *dev_data;
  194. char *token;
  195. };
  196. static void free_watch_adapter(struct watch_adapter *watch)
  197. {
  198. kfree(watch->watch.node);
  199. kfree(watch->token);
  200. kfree(watch);
  201. }
  202. static struct watch_adapter *alloc_watch_adapter(const char *path,
  203. const char *token)
  204. {
  205. struct watch_adapter *watch;
  206. watch = kzalloc(sizeof(*watch), GFP_KERNEL);
  207. if (watch == NULL)
  208. goto out_fail;
  209. watch->watch.node = kstrdup(path, GFP_KERNEL);
  210. if (watch->watch.node == NULL)
  211. goto out_free;
  212. watch->token = kstrdup(token, GFP_KERNEL);
  213. if (watch->token == NULL)
  214. goto out_free;
  215. return watch;
  216. out_free:
  217. free_watch_adapter(watch);
  218. out_fail:
  219. return NULL;
  220. }
  221. static void watch_fired(struct xenbus_watch *watch,
  222. const char *path,
  223. const char *token)
  224. {
  225. struct watch_adapter *adap;
  226. struct xsd_sockmsg hdr;
  227. const char *token_caller;
  228. int path_len, tok_len, body_len;
  229. int ret;
  230. LIST_HEAD(staging_q);
  231. adap = container_of(watch, struct watch_adapter, watch);
  232. token_caller = adap->token;
  233. path_len = strlen(path) + 1;
  234. tok_len = strlen(token_caller) + 1;
  235. body_len = path_len + tok_len;
  236. hdr.type = XS_WATCH_EVENT;
  237. hdr.len = body_len;
  238. mutex_lock(&adap->dev_data->reply_mutex);
  239. ret = queue_reply(&staging_q, &hdr, sizeof(hdr));
  240. if (!ret)
  241. ret = queue_reply(&staging_q, path, path_len);
  242. if (!ret)
  243. ret = queue_reply(&staging_q, token_caller, tok_len);
  244. if (!ret) {
  245. /* success: pass reply list onto watcher */
  246. list_splice_tail(&staging_q, &adap->dev_data->read_buffers);
  247. wake_up(&adap->dev_data->read_waitq);
  248. } else
  249. queue_cleanup(&staging_q);
  250. mutex_unlock(&adap->dev_data->reply_mutex);
  251. }
  252. static int xenbus_command_reply(struct xenbus_file_priv *u,
  253. unsigned int msg_type, const char *reply)
  254. {
  255. struct {
  256. struct xsd_sockmsg hdr;
  257. const char body[16];
  258. } msg;
  259. int rc;
  260. msg.hdr = u->u.msg;
  261. msg.hdr.type = msg_type;
  262. msg.hdr.len = strlen(reply) + 1;
  263. if (msg.hdr.len > sizeof(msg.body))
  264. return -E2BIG;
  265. mutex_lock(&u->reply_mutex);
  266. rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len);
  267. wake_up(&u->read_waitq);
  268. mutex_unlock(&u->reply_mutex);
  269. return rc;
  270. }
  271. static int xenbus_write_transaction(unsigned msg_type,
  272. struct xenbus_file_priv *u)
  273. {
  274. int rc;
  275. void *reply;
  276. struct xenbus_transaction_holder *trans = NULL;
  277. LIST_HEAD(staging_q);
  278. if (msg_type == XS_TRANSACTION_START) {
  279. trans = kmalloc(sizeof(*trans), GFP_KERNEL);
  280. if (!trans) {
  281. rc = -ENOMEM;
  282. goto out;
  283. }
  284. } else if (u->u.msg.tx_id != 0) {
  285. list_for_each_entry(trans, &u->transactions, list)
  286. if (trans->handle.id == u->u.msg.tx_id)
  287. break;
  288. if (&trans->list == &u->transactions)
  289. return xenbus_command_reply(u, XS_ERROR, "ENOENT");
  290. }
  291. reply = xenbus_dev_request_and_reply(&u->u.msg);
  292. if (IS_ERR(reply)) {
  293. if (msg_type == XS_TRANSACTION_START)
  294. kfree(trans);
  295. rc = PTR_ERR(reply);
  296. goto out;
  297. }
  298. if (msg_type == XS_TRANSACTION_START) {
  299. if (u->u.msg.type == XS_ERROR)
  300. kfree(trans);
  301. else {
  302. trans->handle.id = simple_strtoul(reply, NULL, 0);
  303. list_add(&trans->list, &u->transactions);
  304. }
  305. } else if (u->u.msg.type == XS_TRANSACTION_END) {
  306. list_del(&trans->list);
  307. kfree(trans);
  308. }
  309. mutex_lock(&u->reply_mutex);
  310. rc = queue_reply(&staging_q, &u->u.msg, sizeof(u->u.msg));
  311. if (!rc)
  312. rc = queue_reply(&staging_q, reply, u->u.msg.len);
  313. if (!rc) {
  314. list_splice_tail(&staging_q, &u->read_buffers);
  315. wake_up(&u->read_waitq);
  316. } else {
  317. queue_cleanup(&staging_q);
  318. }
  319. mutex_unlock(&u->reply_mutex);
  320. kfree(reply);
  321. out:
  322. return rc;
  323. }
  324. static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
  325. {
  326. struct watch_adapter *watch;
  327. char *path, *token;
  328. int err, rc;
  329. LIST_HEAD(staging_q);
  330. path = u->u.buffer + sizeof(u->u.msg);
  331. token = memchr(path, 0, u->u.msg.len);
  332. if (token == NULL) {
  333. rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
  334. goto out;
  335. }
  336. token++;
  337. if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) {
  338. rc = xenbus_command_reply(u, XS_ERROR, "EINVAL");
  339. goto out;
  340. }
  341. if (msg_type == XS_WATCH) {
  342. watch = alloc_watch_adapter(path, token);
  343. if (watch == NULL) {
  344. rc = -ENOMEM;
  345. goto out;
  346. }
  347. watch->watch.callback = watch_fired;
  348. watch->dev_data = u;
  349. err = register_xenbus_watch(&watch->watch);
  350. if (err) {
  351. free_watch_adapter(watch);
  352. rc = err;
  353. goto out;
  354. }
  355. list_add(&watch->list, &u->watches);
  356. } else {
  357. list_for_each_entry(watch, &u->watches, list) {
  358. if (!strcmp(watch->token, token) &&
  359. !strcmp(watch->watch.node, path)) {
  360. unregister_xenbus_watch(&watch->watch);
  361. list_del(&watch->list);
  362. free_watch_adapter(watch);
  363. break;
  364. }
  365. }
  366. }
  367. /* Success. Synthesize a reply to say all is OK. */
  368. rc = xenbus_command_reply(u, msg_type, "OK");
  369. out:
  370. return rc;
  371. }
  372. static ssize_t xenbus_file_write(struct file *filp,
  373. const char __user *ubuf,
  374. size_t len, loff_t *ppos)
  375. {
  376. struct xenbus_file_priv *u = filp->private_data;
  377. uint32_t msg_type;
  378. int rc = len;
  379. int ret;
  380. LIST_HEAD(staging_q);
  381. /*
  382. * We're expecting usermode to be writing properly formed
  383. * xenbus messages. If they write an incomplete message we
  384. * buffer it up. Once it is complete, we act on it.
  385. */
  386. /*
  387. * Make sure concurrent writers can't stomp all over each
  388. * other's messages and make a mess of our partial message
  389. * buffer. We don't make any attemppt to stop multiple
  390. * writers from making a mess of each other's incomplete
  391. * messages; we're just trying to guarantee our own internal
  392. * consistency and make sure that single writes are handled
  393. * atomically.
  394. */
  395. mutex_lock(&u->msgbuffer_mutex);
  396. /* Get this out of the way early to avoid confusion */
  397. if (len == 0)
  398. goto out;
  399. /* Can't write a xenbus message larger we can buffer */
  400. if (len > sizeof(u->u.buffer) - u->len) {
  401. /* On error, dump existing buffer */
  402. u->len = 0;
  403. rc = -EINVAL;
  404. goto out;
  405. }
  406. ret = copy_from_user(u->u.buffer + u->len, ubuf, len);
  407. if (ret != 0) {
  408. rc = -EFAULT;
  409. goto out;
  410. }
  411. /* Deal with a partial copy. */
  412. len -= ret;
  413. rc = len;
  414. u->len += len;
  415. /* Return if we haven't got a full message yet */
  416. if (u->len < sizeof(u->u.msg))
  417. goto out; /* not even the header yet */
  418. /* If we're expecting a message that's larger than we can
  419. possibly send, dump what we have and return an error. */
  420. if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) {
  421. rc = -E2BIG;
  422. u->len = 0;
  423. goto out;
  424. }
  425. if (u->len < (sizeof(u->u.msg) + u->u.msg.len))
  426. goto out; /* incomplete data portion */
  427. /*
  428. * OK, now we have a complete message. Do something with it.
  429. */
  430. msg_type = u->u.msg.type;
  431. switch (msg_type) {
  432. case XS_WATCH:
  433. case XS_UNWATCH:
  434. /* (Un)Ask for some path to be watched for changes */
  435. ret = xenbus_write_watch(msg_type, u);
  436. break;
  437. default:
  438. /* Send out a transaction */
  439. ret = xenbus_write_transaction(msg_type, u);
  440. break;
  441. }
  442. if (ret != 0)
  443. rc = ret;
  444. /* Buffered message consumed */
  445. u->len = 0;
  446. out:
  447. mutex_unlock(&u->msgbuffer_mutex);
  448. return rc;
  449. }
  450. static int xenbus_file_open(struct inode *inode, struct file *filp)
  451. {
  452. struct xenbus_file_priv *u;
  453. if (xen_store_evtchn == 0)
  454. return -ENOENT;
  455. nonseekable_open(inode, filp);
  456. filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
  457. u = kzalloc(sizeof(*u), GFP_KERNEL);
  458. if (u == NULL)
  459. return -ENOMEM;
  460. INIT_LIST_HEAD(&u->transactions);
  461. INIT_LIST_HEAD(&u->watches);
  462. INIT_LIST_HEAD(&u->read_buffers);
  463. init_waitqueue_head(&u->read_waitq);
  464. mutex_init(&u->reply_mutex);
  465. mutex_init(&u->msgbuffer_mutex);
  466. filp->private_data = u;
  467. return 0;
  468. }
  469. static int xenbus_file_release(struct inode *inode, struct file *filp)
  470. {
  471. struct xenbus_file_priv *u = filp->private_data;
  472. struct xenbus_transaction_holder *trans, *tmp;
  473. struct watch_adapter *watch, *tmp_watch;
  474. struct read_buffer *rb, *tmp_rb;
  475. /*
  476. * No need for locking here because there are no other users,
  477. * by definition.
  478. */
  479. list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
  480. xenbus_transaction_end(trans->handle, 1);
  481. list_del(&trans->list);
  482. kfree(trans);
  483. }
  484. list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
  485. unregister_xenbus_watch(&watch->watch);
  486. list_del(&watch->list);
  487. free_watch_adapter(watch);
  488. }
  489. list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) {
  490. list_del(&rb->list);
  491. kfree(rb);
  492. }
  493. kfree(u);
  494. return 0;
  495. }
  496. static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
  497. {
  498. struct xenbus_file_priv *u = file->private_data;
  499. poll_wait(file, &u->read_waitq, wait);
  500. if (!list_empty(&u->read_buffers))
  501. return POLLIN | POLLRDNORM;
  502. return 0;
  503. }
  504. const struct file_operations xen_xenbus_fops = {
  505. .read = xenbus_file_read,
  506. .write = xenbus_file_write,
  507. .open = xenbus_file_open,
  508. .release = xenbus_file_release,
  509. .poll = xenbus_file_poll,
  510. .llseek = no_llseek,
  511. };
  512. EXPORT_SYMBOL_GPL(xen_xenbus_fops);
  513. static struct miscdevice xenbus_dev = {
  514. .minor = MISC_DYNAMIC_MINOR,
  515. .name = "xen/xenbus",
  516. .fops = &xen_xenbus_fops,
  517. };
  518. static int __init xenbus_init(void)
  519. {
  520. int err;
  521. if (!xen_domain())
  522. return -ENODEV;
  523. err = misc_register(&xenbus_dev);
  524. if (err)
  525. pr_err("Could not register xenbus frontend device\n");
  526. return err;
  527. }
  528. device_initcall(xenbus_init);