mux.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956
  1. /*
  2. * linux/fs/9p/mux.c
  3. *
  4. * Protocol Multiplexer
  5. *
  6. * Copyright (C) 2004 by Eric Van Hensbergen <ericvh@gmail.com>
  7. * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to:
  21. * Free Software Foundation
  22. * 51 Franklin Street, Fifth Floor
  23. * Boston, MA 02111-1301 USA
  24. *
  25. */
  26. #include <linux/config.h>
  27. #include <linux/module.h>
  28. #include <linux/errno.h>
  29. #include <linux/fs.h>
  30. #include <linux/poll.h>
  31. #include <linux/kthread.h>
  32. #include <linux/idr.h>
  33. #include "debug.h"
  34. #include "v9fs.h"
  35. #include "9p.h"
  36. #include "conv.h"
  37. #include "transport.h"
  38. #include "mux.h"
  39. #define ERREQFLUSH 1
  40. #define SCHED_TIMEOUT 10
  41. #define MAXPOLLWADDR 2
  42. enum {
  43. Rworksched = 1, /* read work scheduled or running */
  44. Rpending = 2, /* can read */
  45. Wworksched = 4, /* write work scheduled or running */
  46. Wpending = 8, /* can write */
  47. };
  48. struct v9fs_mux_poll_task;
  49. struct v9fs_req {
  50. int tag;
  51. struct v9fs_fcall *tcall;
  52. struct v9fs_fcall *rcall;
  53. int err;
  54. v9fs_mux_req_callback cb;
  55. void *cba;
  56. struct list_head req_list;
  57. };
  58. struct v9fs_mux_data {
  59. spinlock_t lock;
  60. struct list_head mux_list;
  61. struct v9fs_mux_poll_task *poll_task;
  62. int msize;
  63. unsigned char *extended;
  64. struct v9fs_transport *trans;
  65. struct v9fs_idpool tidpool;
  66. int err;
  67. wait_queue_head_t equeue;
  68. struct list_head req_list;
  69. struct list_head unsent_req_list;
  70. struct v9fs_fcall *rcall;
  71. int rpos;
  72. char *rbuf;
  73. int wpos;
  74. int wsize;
  75. char *wbuf;
  76. wait_queue_t poll_wait[MAXPOLLWADDR];
  77. wait_queue_head_t *poll_waddr[MAXPOLLWADDR];
  78. poll_table pt;
  79. struct work_struct rq;
  80. struct work_struct wq;
  81. unsigned long wsched;
  82. };
  83. struct v9fs_mux_poll_task {
  84. struct task_struct *task;
  85. struct list_head mux_list;
  86. int muxnum;
  87. };
  88. struct v9fs_mux_rpc {
  89. struct v9fs_mux_data *m;
  90. struct v9fs_req *req;
  91. int err;
  92. struct v9fs_fcall *rcall;
  93. wait_queue_head_t wqueue;
  94. };
  95. extern int v9fs_errstr2errno(char *str, int len);
  96. static int v9fs_poll_proc(void *);
  97. static void v9fs_read_work(void *);
  98. static void v9fs_write_work(void *);
  99. static void v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
  100. poll_table * p);
  101. static u16 v9fs_mux_get_tag(struct v9fs_mux_data *);
  102. static void v9fs_mux_put_tag(struct v9fs_mux_data *, u16);
  103. static DECLARE_MUTEX(v9fs_mux_task_lock);
  104. static struct workqueue_struct *v9fs_mux_wq;
  105. static int v9fs_mux_num;
  106. static int v9fs_mux_poll_task_num;
  107. static struct v9fs_mux_poll_task v9fs_mux_poll_tasks[100];
  108. void v9fs_mux_global_init(void)
  109. {
  110. int i;
  111. for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++)
  112. v9fs_mux_poll_tasks[i].task = NULL;
  113. v9fs_mux_wq = create_workqueue("v9fs");
  114. }
  115. void v9fs_mux_global_exit(void)
  116. {
  117. destroy_workqueue(v9fs_mux_wq);
  118. }
  119. /**
  120. * v9fs_mux_calc_poll_procs - calculates the number of polling procs
  121. * based on the number of mounted v9fs filesystems.
  122. *
  123. * The current implementation returns sqrt of the number of mounts.
  124. */
  125. inline int v9fs_mux_calc_poll_procs(int muxnum)
  126. {
  127. int n;
  128. if (v9fs_mux_poll_task_num)
  129. n = muxnum / v9fs_mux_poll_task_num +
  130. (muxnum % v9fs_mux_poll_task_num ? 1 : 0);
  131. else
  132. n = 1;
  133. if (n > ARRAY_SIZE(v9fs_mux_poll_tasks))
  134. n = ARRAY_SIZE(v9fs_mux_poll_tasks);
  135. return n;
  136. }
  137. static void v9fs_mux_poll_start(struct v9fs_mux_data *m)
  138. {
  139. int i, n;
  140. struct v9fs_mux_poll_task *vpt, *vptlast;
  141. dprintk(DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, v9fs_mux_num,
  142. v9fs_mux_poll_task_num);
  143. up(&v9fs_mux_task_lock);
  144. n = v9fs_mux_calc_poll_procs(v9fs_mux_num + 1);
  145. if (n > v9fs_mux_poll_task_num) {
  146. for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
  147. if (v9fs_mux_poll_tasks[i].task == NULL) {
  148. vpt = &v9fs_mux_poll_tasks[i];
  149. dprintk(DEBUG_MUX, "create proc %p\n", vpt);
  150. vpt->task =
  151. kthread_create(v9fs_poll_proc, vpt,
  152. "v9fs-poll");
  153. INIT_LIST_HEAD(&vpt->mux_list);
  154. vpt->muxnum = 0;
  155. v9fs_mux_poll_task_num++;
  156. wake_up_process(vpt->task);
  157. break;
  158. }
  159. }
  160. if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks))
  161. dprintk(DEBUG_ERROR, "warning: no free poll slots\n");
  162. }
  163. n = (v9fs_mux_num + 1) / v9fs_mux_poll_task_num +
  164. ((v9fs_mux_num + 1) % v9fs_mux_poll_task_num ? 1 : 0);
  165. vptlast = NULL;
  166. for (i = 0; i < ARRAY_SIZE(v9fs_mux_poll_tasks); i++) {
  167. vpt = &v9fs_mux_poll_tasks[i];
  168. if (vpt->task != NULL) {
  169. vptlast = vpt;
  170. if (vpt->muxnum < n) {
  171. dprintk(DEBUG_MUX, "put in proc %d\n", i);
  172. list_add(&m->mux_list, &vpt->mux_list);
  173. vpt->muxnum++;
  174. m->poll_task = vpt;
  175. memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
  176. init_poll_funcptr(&m->pt, v9fs_pollwait);
  177. break;
  178. }
  179. }
  180. }
  181. if (i >= ARRAY_SIZE(v9fs_mux_poll_tasks)) {
  182. dprintk(DEBUG_MUX, "put in proc %d\n", i);
  183. list_add(&m->mux_list, &vptlast->mux_list);
  184. vptlast->muxnum++;
  185. m->poll_task = vpt;
  186. memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
  187. init_poll_funcptr(&m->pt, v9fs_pollwait);
  188. }
  189. v9fs_mux_num++;
  190. down(&v9fs_mux_task_lock);
  191. }
  192. static void v9fs_mux_poll_stop(struct v9fs_mux_data *m)
  193. {
  194. int i;
  195. struct v9fs_mux_poll_task *vpt;
  196. up(&v9fs_mux_task_lock);
  197. vpt = m->poll_task;
  198. list_del(&m->mux_list);
  199. for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
  200. if (m->poll_waddr[i] != NULL) {
  201. remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]);
  202. m->poll_waddr[i] = NULL;
  203. }
  204. }
  205. vpt->muxnum--;
  206. if (!vpt->muxnum) {
  207. dprintk(DEBUG_MUX, "destroy proc %p\n", vpt);
  208. send_sig(SIGKILL, vpt->task, 1);
  209. vpt->task = NULL;
  210. v9fs_mux_poll_task_num--;
  211. }
  212. v9fs_mux_num--;
  213. down(&v9fs_mux_task_lock);
  214. }
  215. /**
  216. * v9fs_mux_init - allocate and initialize the per-session mux data
  217. * Creates the polling task if this is the first session.
  218. *
  219. * @trans - transport structure
  220. * @msize - maximum message size
  221. * @extended - pointer to the extended flag
  222. */
  223. struct v9fs_mux_data *v9fs_mux_init(struct v9fs_transport *trans, int msize,
  224. unsigned char *extended)
  225. {
  226. int i, n;
  227. struct v9fs_mux_data *m, *mtmp;
  228. dprintk(DEBUG_MUX, "transport %p msize %d\n", trans, msize);
  229. m = kmalloc(sizeof(struct v9fs_mux_data), GFP_KERNEL);
  230. if (!m)
  231. return ERR_PTR(-ENOMEM);
  232. spin_lock_init(&m->lock);
  233. INIT_LIST_HEAD(&m->mux_list);
  234. m->msize = msize;
  235. m->extended = extended;
  236. m->trans = trans;
  237. idr_init(&m->tidpool.pool);
  238. init_MUTEX(&m->tidpool.lock);
  239. m->err = 0;
  240. init_waitqueue_head(&m->equeue);
  241. INIT_LIST_HEAD(&m->req_list);
  242. INIT_LIST_HEAD(&m->unsent_req_list);
  243. m->rcall = NULL;
  244. m->rpos = 0;
  245. m->rbuf = NULL;
  246. m->wpos = m->wsize = 0;
  247. m->wbuf = NULL;
  248. INIT_WORK(&m->rq, v9fs_read_work, m);
  249. INIT_WORK(&m->wq, v9fs_write_work, m);
  250. m->wsched = 0;
  251. memset(&m->poll_waddr, 0, sizeof(m->poll_waddr));
  252. v9fs_mux_poll_start(m);
  253. n = trans->poll(trans, &m->pt);
  254. if (n & POLLIN) {
  255. dprintk(DEBUG_MUX, "mux %p can read\n", m);
  256. set_bit(Rpending, &m->wsched);
  257. }
  258. if (n & POLLOUT) {
  259. dprintk(DEBUG_MUX, "mux %p can write\n", m);
  260. set_bit(Wpending, &m->wsched);
  261. }
  262. for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) {
  263. if (IS_ERR(m->poll_waddr[i])) {
  264. v9fs_mux_poll_stop(m);
  265. mtmp = (void *)m->poll_waddr; /* the error code */
  266. kfree(m);
  267. m = mtmp;
  268. break;
  269. }
  270. }
  271. return m;
  272. }
  273. /**
  274. * v9fs_mux_destroy - cancels all pending requests and frees mux resources
  275. */
  276. void v9fs_mux_destroy(struct v9fs_mux_data *m)
  277. {
  278. dprintk(DEBUG_MUX, "mux %p prev %p next %p\n", m,
  279. m->mux_list.prev, m->mux_list.next);
  280. v9fs_mux_cancel(m, -ECONNRESET);
  281. if (!list_empty(&m->req_list)) {
  282. /* wait until all processes waiting on this session exit */
  283. dprintk(DEBUG_MUX, "mux %p waiting for empty request queue\n",
  284. m);
  285. wait_event_timeout(m->equeue, (list_empty(&m->req_list)), 5000);
  286. dprintk(DEBUG_MUX, "mux %p request queue empty: %d\n", m,
  287. list_empty(&m->req_list));
  288. }
  289. v9fs_mux_poll_stop(m);
  290. m->trans = NULL;
  291. kfree(m);
  292. }
  293. /**
  294. * v9fs_pollwait - called by files poll operation to add v9fs-poll task
  295. * to files wait queue
  296. */
  297. static void
  298. v9fs_pollwait(struct file *filp, wait_queue_head_t * wait_address,
  299. poll_table * p)
  300. {
  301. int i;
  302. struct v9fs_mux_data *m;
  303. m = container_of(p, struct v9fs_mux_data, pt);
  304. for(i = 0; i < ARRAY_SIZE(m->poll_waddr); i++)
  305. if (m->poll_waddr[i] == NULL)
  306. break;
  307. if (i >= ARRAY_SIZE(m->poll_waddr)) {
  308. dprintk(DEBUG_ERROR, "not enough wait_address slots\n");
  309. return;
  310. }
  311. m->poll_waddr[i] = wait_address;
  312. if (!wait_address) {
  313. dprintk(DEBUG_ERROR, "no wait_address\n");
  314. m->poll_waddr[i] = ERR_PTR(-EIO);
  315. return;
  316. }
  317. init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task);
  318. add_wait_queue(wait_address, &m->poll_wait[i]);
  319. }
  320. /**
  321. * v9fs_poll_mux - polls a mux and schedules read or write works if necessary
  322. */
  323. static inline void v9fs_poll_mux(struct v9fs_mux_data *m)
  324. {
  325. int n;
  326. if (m->err < 0)
  327. return;
  328. n = m->trans->poll(m->trans, NULL);
  329. if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
  330. dprintk(DEBUG_MUX, "error mux %p err %d\n", m, n);
  331. if (n >= 0)
  332. n = -ECONNRESET;
  333. v9fs_mux_cancel(m, n);
  334. }
  335. if (n & POLLIN) {
  336. set_bit(Rpending, &m->wsched);
  337. dprintk(DEBUG_MUX, "mux %p can read\n", m);
  338. if (!test_and_set_bit(Rworksched, &m->wsched)) {
  339. dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
  340. queue_work(v9fs_mux_wq, &m->rq);
  341. }
  342. }
  343. if (n & POLLOUT) {
  344. set_bit(Wpending, &m->wsched);
  345. dprintk(DEBUG_MUX, "mux %p can write\n", m);
  346. if ((m->wsize || !list_empty(&m->unsent_req_list))
  347. && !test_and_set_bit(Wworksched, &m->wsched)) {
  348. dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
  349. queue_work(v9fs_mux_wq, &m->wq);
  350. }
  351. }
  352. }
  353. /**
  354. * v9fs_poll_proc - polls all v9fs transports for new events and queues
  355. * the appropriate work to the work queue
  356. */
  357. static int v9fs_poll_proc(void *a)
  358. {
  359. struct v9fs_mux_data *m, *mtmp;
  360. struct v9fs_mux_poll_task *vpt;
  361. vpt = a;
  362. dprintk(DEBUG_MUX, "start %p %p\n", current, vpt);
  363. allow_signal(SIGKILL);
  364. while (!kthread_should_stop()) {
  365. set_current_state(TASK_INTERRUPTIBLE);
  366. if (signal_pending(current))
  367. break;
  368. list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) {
  369. v9fs_poll_mux(m);
  370. }
  371. dprintk(DEBUG_MUX, "sleeping...\n");
  372. schedule_timeout(SCHED_TIMEOUT * HZ);
  373. }
  374. __set_current_state(TASK_RUNNING);
  375. dprintk(DEBUG_MUX, "finish\n");
  376. return 0;
  377. }
  378. /**
  379. * v9fs_write_work - called when a transport can send some data
  380. */
  381. static void v9fs_write_work(void *a)
  382. {
  383. int n, err;
  384. struct v9fs_mux_data *m;
  385. struct v9fs_req *req;
  386. m = a;
  387. if (m->err < 0) {
  388. clear_bit(Wworksched, &m->wsched);
  389. return;
  390. }
  391. if (!m->wsize) {
  392. if (list_empty(&m->unsent_req_list)) {
  393. clear_bit(Wworksched, &m->wsched);
  394. return;
  395. }
  396. spin_lock(&m->lock);
  397. req =
  398. list_entry(m->unsent_req_list.next, struct v9fs_req,
  399. req_list);
  400. list_move_tail(&req->req_list, &m->req_list);
  401. m->wbuf = req->tcall->sdata;
  402. m->wsize = req->tcall->size;
  403. m->wpos = 0;
  404. dump_data(m->wbuf, m->wsize);
  405. spin_unlock(&m->lock);
  406. }
  407. dprintk(DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, m->wsize);
  408. clear_bit(Wpending, &m->wsched);
  409. err = m->trans->write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos);
  410. dprintk(DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
  411. if (err == -EAGAIN) {
  412. clear_bit(Wworksched, &m->wsched);
  413. return;
  414. }
  415. if (err <= 0)
  416. goto error;
  417. m->wpos += err;
  418. if (m->wpos == m->wsize)
  419. m->wpos = m->wsize = 0;
  420. if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
  421. if (test_and_clear_bit(Wpending, &m->wsched))
  422. n = POLLOUT;
  423. else
  424. n = m->trans->poll(m->trans, NULL);
  425. if (n & POLLOUT) {
  426. dprintk(DEBUG_MUX, "schedule write work mux %p\n", m);
  427. queue_work(v9fs_mux_wq, &m->wq);
  428. } else
  429. clear_bit(Wworksched, &m->wsched);
  430. } else
  431. clear_bit(Wworksched, &m->wsched);
  432. return;
  433. error:
  434. v9fs_mux_cancel(m, err);
  435. clear_bit(Wworksched, &m->wsched);
  436. }
  437. static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req)
  438. {
  439. int ecode, tag;
  440. struct v9fs_str *ename;
  441. tag = req->tag;
  442. if (req->rcall->id == RERROR && !req->err) {
  443. ecode = req->rcall->params.rerror.errno;
  444. ename = &req->rcall->params.rerror.error;
  445. dprintk(DEBUG_MUX, "Rerror %.*s\n", ename->len, ename->str);
  446. if (*m->extended)
  447. req->err = -ecode;
  448. if (!req->err) {
  449. req->err = v9fs_errstr2errno(ename->str, ename->len);
  450. if (!req->err) { /* string match failed */
  451. PRINT_FCALL_ERROR("unknown error", req->rcall);
  452. }
  453. if (!req->err)
  454. req->err = -ESERVERFAULT;
  455. }
  456. } else if (req->tcall && req->rcall->id != req->tcall->id + 1) {
  457. dprintk(DEBUG_ERROR, "fcall mismatch: expected %d, got %d\n",
  458. req->tcall->id + 1, req->rcall->id);
  459. if (!req->err)
  460. req->err = -EIO;
  461. }
  462. if (req->cb && req->err != ERREQFLUSH) {
  463. dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n",
  464. req->tcall, req->rcall);
  465. (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
  466. req->cb = NULL;
  467. } else
  468. kfree(req->rcall);
  469. v9fs_mux_put_tag(m, tag);
  470. wake_up(&m->equeue);
  471. kfree(req);
  472. }
  473. /**
  474. * v9fs_read_work - called when there is some data to be read from a transport
  475. */
  476. static void v9fs_read_work(void *a)
  477. {
  478. int n, err;
  479. struct v9fs_mux_data *m;
  480. struct v9fs_req *req, *rptr, *rreq;
  481. struct v9fs_fcall *rcall;
  482. char *rbuf;
  483. m = a;
  484. if (m->err < 0)
  485. return;
  486. rcall = NULL;
  487. dprintk(DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
  488. if (!m->rcall) {
  489. m->rcall =
  490. kmalloc(sizeof(struct v9fs_fcall) + m->msize, GFP_KERNEL);
  491. if (!m->rcall) {
  492. err = -ENOMEM;
  493. goto error;
  494. }
  495. m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
  496. m->rpos = 0;
  497. }
  498. clear_bit(Rpending, &m->wsched);
  499. err = m->trans->read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos);
  500. dprintk(DEBUG_MUX, "mux %p got %d bytes\n", m, err);
  501. if (err == -EAGAIN) {
  502. clear_bit(Rworksched, &m->wsched);
  503. return;
  504. }
  505. if (err <= 0)
  506. goto error;
  507. m->rpos += err;
  508. while (m->rpos > 4) {
  509. n = le32_to_cpu(*(__le32 *) m->rbuf);
  510. if (n >= m->msize) {
  511. dprintk(DEBUG_ERROR,
  512. "requested packet size too big: %d\n", n);
  513. err = -EIO;
  514. goto error;
  515. }
  516. if (m->rpos < n)
  517. break;
  518. dump_data(m->rbuf, n);
  519. err =
  520. v9fs_deserialize_fcall(m->rbuf, n, m->rcall, *m->extended);
  521. if (err < 0) {
  522. goto error;
  523. }
  524. rcall = m->rcall;
  525. rbuf = m->rbuf;
  526. if (m->rpos > n) {
  527. m->rcall = kmalloc(sizeof(struct v9fs_fcall) + m->msize,
  528. GFP_KERNEL);
  529. if (!m->rcall) {
  530. err = -ENOMEM;
  531. goto error;
  532. }
  533. m->rbuf = (char *)m->rcall + sizeof(struct v9fs_fcall);
  534. memmove(m->rbuf, rbuf + n, m->rpos - n);
  535. m->rpos -= n;
  536. } else {
  537. m->rcall = NULL;
  538. m->rbuf = NULL;
  539. m->rpos = 0;
  540. }
  541. dprintk(DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, rcall->id,
  542. rcall->tag);
  543. req = NULL;
  544. spin_lock(&m->lock);
  545. list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) {
  546. if (rreq->tag == rcall->tag) {
  547. req = rreq;
  548. req->rcall = rcall;
  549. list_del(&req->req_list);
  550. spin_unlock(&m->lock);
  551. process_request(m, req);
  552. break;
  553. }
  554. }
  555. if (!req) {
  556. spin_unlock(&m->lock);
  557. if (err >= 0 && rcall->id != RFLUSH)
  558. dprintk(DEBUG_ERROR,
  559. "unexpected response mux %p id %d tag %d\n",
  560. m, rcall->id, rcall->tag);
  561. kfree(rcall);
  562. }
  563. }
  564. if (!list_empty(&m->req_list)) {
  565. if (test_and_clear_bit(Rpending, &m->wsched))
  566. n = POLLIN;
  567. else
  568. n = m->trans->poll(m->trans, NULL);
  569. if (n & POLLIN) {
  570. dprintk(DEBUG_MUX, "schedule read work mux %p\n", m);
  571. queue_work(v9fs_mux_wq, &m->rq);
  572. } else
  573. clear_bit(Rworksched, &m->wsched);
  574. } else
  575. clear_bit(Rworksched, &m->wsched);
  576. return;
  577. error:
  578. v9fs_mux_cancel(m, err);
  579. clear_bit(Rworksched, &m->wsched);
  580. }
  581. /**
  582. * v9fs_send_request - send 9P request
  583. * The function can sleep until the request is scheduled for sending.
  584. * The function can be interrupted. Return from the function is not
  585. * a guarantee that the request is sent succesfully. Can return errors
  586. * that can be retrieved by PTR_ERR macros.
  587. *
  588. * @m: mux data
  589. * @tc: request to be sent
  590. * @cb: callback function to call when response is received
  591. * @cba: parameter to pass to the callback function
  592. */
  593. static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m,
  594. struct v9fs_fcall *tc,
  595. v9fs_mux_req_callback cb, void *cba)
  596. {
  597. int n;
  598. struct v9fs_req *req;
  599. dprintk(DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
  600. tc, tc->id);
  601. if (m->err < 0)
  602. return ERR_PTR(m->err);
  603. req = kmalloc(sizeof(struct v9fs_req), GFP_KERNEL);
  604. if (!req)
  605. return ERR_PTR(-ENOMEM);
  606. if (tc->id == TVERSION)
  607. n = V9FS_NOTAG;
  608. else
  609. n = v9fs_mux_get_tag(m);
  610. if (n < 0)
  611. return ERR_PTR(-ENOMEM);
  612. v9fs_set_tag(tc, n);
  613. req->tag = n;
  614. req->tcall = tc;
  615. req->rcall = NULL;
  616. req->err = 0;
  617. req->cb = cb;
  618. req->cba = cba;
  619. spin_lock(&m->lock);
  620. list_add_tail(&req->req_list, &m->unsent_req_list);
  621. spin_unlock(&m->lock);
  622. if (test_and_clear_bit(Wpending, &m->wsched))
  623. n = POLLOUT;
  624. else
  625. n = m->trans->poll(m->trans, NULL);
  626. if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
  627. queue_work(v9fs_mux_wq, &m->wq);
  628. return req;
  629. }
  630. static inline void
  631. v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc,
  632. int err)
  633. {
  634. v9fs_mux_req_callback cb;
  635. int tag;
  636. struct v9fs_mux_data *m;
  637. struct v9fs_req *req, *rptr;
  638. m = a;
  639. dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc,
  640. rc, err, tc->params.tflush.oldtag);
  641. spin_lock(&m->lock);
  642. cb = NULL;
  643. tag = tc->params.tflush.oldtag;
  644. list_for_each_entry_safe(req, rptr, &m->req_list, req_list) {
  645. if (req->tag == tag) {
  646. list_del(&req->req_list);
  647. if (req->cb) {
  648. cb = req->cb;
  649. req->cb = NULL;
  650. spin_unlock(&m->lock);
  651. (*cb) (req->cba, req->tcall, req->rcall,
  652. req->err);
  653. }
  654. kfree(req);
  655. wake_up(&m->equeue);
  656. break;
  657. }
  658. }
  659. if (!cb)
  660. spin_unlock(&m->lock);
  661. v9fs_mux_put_tag(m, tag);
  662. kfree(tc);
  663. kfree(rc);
  664. }
  665. static void
  666. v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req)
  667. {
  668. struct v9fs_fcall *fc;
  669. dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);
  670. fc = v9fs_create_tflush(req->tag);
  671. v9fs_send_request(m, fc, v9fs_mux_flush_cb, m);
  672. }
  673. static void
  674. v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err)
  675. {
  676. struct v9fs_mux_rpc *r;
  677. if (err == ERREQFLUSH) {
  678. dprintk(DEBUG_MUX, "err req flush\n");
  679. return;
  680. }
  681. r = a;
  682. dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req,
  683. tc, rc, err);
  684. r->rcall = rc;
  685. r->err = err;
  686. wake_up(&r->wqueue);
  687. }
  688. /**
  689. * v9fs_mux_rpc - sends 9P request and waits until a response is available.
  690. * The function can be interrupted.
  691. * @m: mux data
  692. * @tc: request to be sent
  693. * @rc: pointer where a pointer to the response is stored
  694. */
  695. int
  696. v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
  697. struct v9fs_fcall **rc)
  698. {
  699. int err;
  700. unsigned long flags;
  701. struct v9fs_req *req;
  702. struct v9fs_mux_rpc r;
  703. r.err = 0;
  704. r.rcall = NULL;
  705. r.m = m;
  706. init_waitqueue_head(&r.wqueue);
  707. if (rc)
  708. *rc = NULL;
  709. req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r);
  710. if (IS_ERR(req)) {
  711. err = PTR_ERR(req);
  712. dprintk(DEBUG_MUX, "error %d\n", err);
  713. return PTR_ERR(req);
  714. }
  715. r.req = req;
  716. dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc,
  717. req->tag, &r, req);
  718. err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0);
  719. if (r.err < 0)
  720. err = r.err;
  721. if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) {
  722. spin_lock(&m->lock);
  723. req->tcall = NULL;
  724. req->err = ERREQFLUSH;
  725. spin_unlock(&m->lock);
  726. clear_thread_flag(TIF_SIGPENDING);
  727. v9fs_mux_flush_request(m, req);
  728. spin_lock_irqsave(&current->sighand->siglock, flags);
  729. recalc_sigpending();
  730. spin_unlock_irqrestore(&current->sighand->siglock, flags);
  731. }
  732. if (!err) {
  733. if (r.rcall)
  734. dprintk(DEBUG_MUX, "got response id %d tag %d\n",
  735. r.rcall->id, r.rcall->tag);
  736. if (rc)
  737. *rc = r.rcall;
  738. else
  739. kfree(r.rcall);
  740. } else {
  741. kfree(r.rcall);
  742. dprintk(DEBUG_MUX, "got error %d\n", err);
  743. if (err > 0)
  744. err = -EIO;
  745. }
  746. return err;
  747. }
  748. /**
  749. * v9fs_mux_rpcnb - sends 9P request without waiting for response.
  750. * @m: mux data
  751. * @tc: request to be sent
  752. * @cb: callback function to be called when response arrives
  753. * @cba: value to pass to the callback function
  754. */
  755. int v9fs_mux_rpcnb(struct v9fs_mux_data *m, struct v9fs_fcall *tc,
  756. v9fs_mux_req_callback cb, void *a)
  757. {
  758. int err;
  759. struct v9fs_req *req;
  760. req = v9fs_send_request(m, tc, cb, a);
  761. if (IS_ERR(req)) {
  762. err = PTR_ERR(req);
  763. dprintk(DEBUG_MUX, "error %d\n", err);
  764. return PTR_ERR(req);
  765. }
  766. dprintk(DEBUG_MUX, "mux %p tc %p tag %d\n", m, tc, req->tag);
  767. return 0;
  768. }
  769. /**
  770. * v9fs_mux_cancel - cancel all pending requests with error
  771. * @m: mux data
  772. * @err: error code
  773. */
  774. void v9fs_mux_cancel(struct v9fs_mux_data *m, int err)
  775. {
  776. struct v9fs_req *req, *rtmp;
  777. LIST_HEAD(cancel_list);
  778. dprintk(DEBUG_MUX, "mux %p err %d\n", m, err);
  779. m->err = err;
  780. spin_lock(&m->lock);
  781. list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
  782. list_move(&req->req_list, &cancel_list);
  783. }
  784. spin_unlock(&m->lock);
  785. list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
  786. list_del(&req->req_list);
  787. if (!req->err)
  788. req->err = err;
  789. if (req->cb)
  790. (*req->cb) (req->cba, req->tcall, req->rcall, req->err);
  791. else
  792. kfree(req->rcall);
  793. kfree(req);
  794. }
  795. wake_up(&m->equeue);
  796. }
  797. static u16 v9fs_mux_get_tag(struct v9fs_mux_data *m)
  798. {
  799. int tag;
  800. tag = v9fs_get_idpool(&m->tidpool);
  801. if (tag < 0)
  802. return V9FS_NOTAG;
  803. else
  804. return (u16) tag;
  805. }
  806. static void v9fs_mux_put_tag(struct v9fs_mux_data *m, u16 tag)
  807. {
  808. if (tag != V9FS_NOTAG && v9fs_check_idpool(tag, &m->tidpool))
  809. v9fs_put_idpool(tag, &m->tidpool);
  810. }