dev.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU GPL.
  5. See the file COPYING.
  6. */
  7. #include "fuse_i.h"
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. #include <linux/poll.h>
  11. #include <linux/uio.h>
  12. #include <linux/miscdevice.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/file.h>
  15. #include <linux/slab.h>
  16. #include <linux/pipe_fs_i.h>
  17. #include <linux/swap.h>
  18. #include <linux/splice.h>
  19. MODULE_ALIAS_MISCDEV(FUSE_MINOR);
  20. MODULE_ALIAS("devname:fuse");
  21. static struct kmem_cache *fuse_req_cachep;
  22. static struct fuse_dev *fuse_get_dev(struct file *file)
  23. {
  24. /*
  25. * Lockless access is OK, because file->private data is set
  26. * once during mount and is valid until the file is released.
  27. */
  28. return ACCESS_ONCE(file->private_data);
  29. }
  30. static void fuse_request_init(struct fuse_req *req, struct page **pages,
  31. struct fuse_page_desc *page_descs,
  32. unsigned npages)
  33. {
  34. memset(req, 0, sizeof(*req));
  35. memset(pages, 0, sizeof(*pages) * npages);
  36. memset(page_descs, 0, sizeof(*page_descs) * npages);
  37. INIT_LIST_HEAD(&req->list);
  38. INIT_LIST_HEAD(&req->intr_entry);
  39. init_waitqueue_head(&req->waitq);
  40. atomic_set(&req->count, 1);
  41. req->pages = pages;
  42. req->page_descs = page_descs;
  43. req->max_pages = npages;
  44. __set_bit(FR_PENDING, &req->flags);
  45. }
  46. static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
  47. {
  48. struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
  49. if (req) {
  50. struct page **pages;
  51. struct fuse_page_desc *page_descs;
  52. if (npages <= FUSE_REQ_INLINE_PAGES) {
  53. pages = req->inline_pages;
  54. page_descs = req->inline_page_descs;
  55. } else {
  56. pages = kmalloc(sizeof(struct page *) * npages, flags);
  57. page_descs = kmalloc(sizeof(struct fuse_page_desc) *
  58. npages, flags);
  59. }
  60. if (!pages || !page_descs) {
  61. kfree(pages);
  62. kfree(page_descs);
  63. kmem_cache_free(fuse_req_cachep, req);
  64. return NULL;
  65. }
  66. fuse_request_init(req, pages, page_descs, npages);
  67. }
  68. return req;
  69. }
  70. struct fuse_req *fuse_request_alloc(unsigned npages)
  71. {
  72. return __fuse_request_alloc(npages, GFP_KERNEL);
  73. }
  74. EXPORT_SYMBOL_GPL(fuse_request_alloc);
  75. struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
  76. {
  77. return __fuse_request_alloc(npages, GFP_NOFS);
  78. }
  79. void fuse_request_free(struct fuse_req *req)
  80. {
  81. if (req->pages != req->inline_pages) {
  82. kfree(req->pages);
  83. kfree(req->page_descs);
  84. }
  85. kmem_cache_free(fuse_req_cachep, req);
  86. }
  87. void __fuse_get_request(struct fuse_req *req)
  88. {
  89. atomic_inc(&req->count);
  90. }
  91. /* Must be called with > 1 refcount */
  92. static void __fuse_put_request(struct fuse_req *req)
  93. {
  94. BUG_ON(atomic_read(&req->count) < 2);
  95. atomic_dec(&req->count);
  96. }
  97. static void fuse_req_init_context(struct fuse_req *req)
  98. {
  99. req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
  100. req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
  101. req->in.h.pid = current->pid;
  102. }
  103. void fuse_set_initialized(struct fuse_conn *fc)
  104. {
  105. /* Make sure stores before this are seen on another CPU */
  106. smp_wmb();
  107. fc->initialized = 1;
  108. }
  109. static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
  110. {
  111. return !fc->initialized || (for_background && fc->blocked);
  112. }
  113. static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
  114. bool for_background)
  115. {
  116. struct fuse_req *req;
  117. int err;
  118. atomic_inc(&fc->num_waiting);
  119. if (fuse_block_alloc(fc, for_background)) {
  120. err = -EINTR;
  121. if (wait_event_killable_exclusive(fc->blocked_waitq,
  122. !fuse_block_alloc(fc, for_background)))
  123. goto out;
  124. }
  125. /* Matches smp_wmb() in fuse_set_initialized() */
  126. smp_rmb();
  127. err = -ENOTCONN;
  128. if (!fc->connected)
  129. goto out;
  130. err = -ECONNREFUSED;
  131. if (fc->conn_error)
  132. goto out;
  133. req = fuse_request_alloc(npages);
  134. err = -ENOMEM;
  135. if (!req) {
  136. if (for_background)
  137. wake_up(&fc->blocked_waitq);
  138. goto out;
  139. }
  140. fuse_req_init_context(req);
  141. __set_bit(FR_WAITING, &req->flags);
  142. if (for_background)
  143. __set_bit(FR_BACKGROUND, &req->flags);
  144. return req;
  145. out:
  146. atomic_dec(&fc->num_waiting);
  147. return ERR_PTR(err);
  148. }
  149. struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
  150. {
  151. return __fuse_get_req(fc, npages, false);
  152. }
  153. EXPORT_SYMBOL_GPL(fuse_get_req);
  154. struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
  155. unsigned npages)
  156. {
  157. return __fuse_get_req(fc, npages, true);
  158. }
  159. EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
  160. /*
  161. * Return request in fuse_file->reserved_req. However that may
  162. * currently be in use. If that is the case, wait for it to become
  163. * available.
  164. */
  165. static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
  166. struct file *file)
  167. {
  168. struct fuse_req *req = NULL;
  169. struct fuse_file *ff = file->private_data;
  170. do {
  171. wait_event(fc->reserved_req_waitq, ff->reserved_req);
  172. spin_lock(&fc->lock);
  173. if (ff->reserved_req) {
  174. req = ff->reserved_req;
  175. ff->reserved_req = NULL;
  176. req->stolen_file = get_file(file);
  177. }
  178. spin_unlock(&fc->lock);
  179. } while (!req);
  180. return req;
  181. }
  182. /*
  183. * Put stolen request back into fuse_file->reserved_req
  184. */
  185. static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
  186. {
  187. struct file *file = req->stolen_file;
  188. struct fuse_file *ff = file->private_data;
  189. spin_lock(&fc->lock);
  190. fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
  191. BUG_ON(ff->reserved_req);
  192. ff->reserved_req = req;
  193. wake_up_all(&fc->reserved_req_waitq);
  194. spin_unlock(&fc->lock);
  195. fput(file);
  196. }
  197. /*
  198. * Gets a requests for a file operation, always succeeds
  199. *
  200. * This is used for sending the FLUSH request, which must get to
  201. * userspace, due to POSIX locks which may need to be unlocked.
  202. *
  203. * If allocation fails due to OOM, use the reserved request in
  204. * fuse_file.
  205. *
  206. * This is very unlikely to deadlock accidentally, since the
  207. * filesystem should not have it's own file open. If deadlock is
  208. * intentional, it can still be broken by "aborting" the filesystem.
  209. */
  210. struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
  211. struct file *file)
  212. {
  213. struct fuse_req *req;
  214. atomic_inc(&fc->num_waiting);
  215. wait_event(fc->blocked_waitq, fc->initialized);
  216. /* Matches smp_wmb() in fuse_set_initialized() */
  217. smp_rmb();
  218. req = fuse_request_alloc(0);
  219. if (!req)
  220. req = get_reserved_req(fc, file);
  221. fuse_req_init_context(req);
  222. __set_bit(FR_WAITING, &req->flags);
  223. __clear_bit(FR_BACKGROUND, &req->flags);
  224. return req;
  225. }
  226. void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
  227. {
  228. if (atomic_dec_and_test(&req->count)) {
  229. if (test_bit(FR_BACKGROUND, &req->flags)) {
  230. /*
  231. * We get here in the unlikely case that a background
  232. * request was allocated but not sent
  233. */
  234. spin_lock(&fc->lock);
  235. if (!fc->blocked)
  236. wake_up(&fc->blocked_waitq);
  237. spin_unlock(&fc->lock);
  238. }
  239. if (test_bit(FR_WAITING, &req->flags)) {
  240. __clear_bit(FR_WAITING, &req->flags);
  241. atomic_dec(&fc->num_waiting);
  242. }
  243. if (req->stolen_file)
  244. put_reserved_req(fc, req);
  245. else
  246. fuse_request_free(req);
  247. }
  248. }
  249. EXPORT_SYMBOL_GPL(fuse_put_request);
  250. static unsigned len_args(unsigned numargs, struct fuse_arg *args)
  251. {
  252. unsigned nbytes = 0;
  253. unsigned i;
  254. for (i = 0; i < numargs; i++)
  255. nbytes += args[i].size;
  256. return nbytes;
  257. }
  258. static u64 fuse_get_unique(struct fuse_iqueue *fiq)
  259. {
  260. return ++fiq->reqctr;
  261. }
  262. static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
  263. {
  264. req->in.h.len = sizeof(struct fuse_in_header) +
  265. len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
  266. list_add_tail(&req->list, &fiq->pending);
  267. wake_up_locked(&fiq->waitq);
  268. kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
  269. }
  270. void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
  271. u64 nodeid, u64 nlookup)
  272. {
  273. struct fuse_iqueue *fiq = &fc->iq;
  274. forget->forget_one.nodeid = nodeid;
  275. forget->forget_one.nlookup = nlookup;
  276. spin_lock(&fiq->waitq.lock);
  277. if (fiq->connected) {
  278. fiq->forget_list_tail->next = forget;
  279. fiq->forget_list_tail = forget;
  280. wake_up_locked(&fiq->waitq);
  281. kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
  282. } else {
  283. kfree(forget);
  284. }
  285. spin_unlock(&fiq->waitq.lock);
  286. }
  287. static void flush_bg_queue(struct fuse_conn *fc)
  288. {
  289. while (fc->active_background < fc->max_background &&
  290. !list_empty(&fc->bg_queue)) {
  291. struct fuse_req *req;
  292. struct fuse_iqueue *fiq = &fc->iq;
  293. req = list_entry(fc->bg_queue.next, struct fuse_req, list);
  294. list_del(&req->list);
  295. fc->active_background++;
  296. spin_lock(&fiq->waitq.lock);
  297. req->in.h.unique = fuse_get_unique(fiq);
  298. queue_request(fiq, req);
  299. spin_unlock(&fiq->waitq.lock);
  300. }
  301. }
  302. /*
  303. * This function is called when a request is finished. Either a reply
  304. * has arrived or it was aborted (and not yet sent) or some error
  305. * occurred during communication with userspace, or the device file
  306. * was closed. The requester thread is woken up (if still waiting),
  307. * the 'end' callback is called if given, else the reference to the
  308. * request is released
  309. */
  310. static void request_end(struct fuse_conn *fc, struct fuse_req *req)
  311. {
  312. struct fuse_iqueue *fiq = &fc->iq;
  313. if (test_and_set_bit(FR_FINISHED, &req->flags))
  314. return;
  315. spin_lock(&fiq->waitq.lock);
  316. list_del_init(&req->intr_entry);
  317. spin_unlock(&fiq->waitq.lock);
  318. WARN_ON(test_bit(FR_PENDING, &req->flags));
  319. WARN_ON(test_bit(FR_SENT, &req->flags));
  320. if (test_bit(FR_BACKGROUND, &req->flags)) {
  321. spin_lock(&fc->lock);
  322. clear_bit(FR_BACKGROUND, &req->flags);
  323. if (fc->num_background == fc->max_background)
  324. fc->blocked = 0;
  325. /* Wake up next waiter, if any */
  326. if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
  327. wake_up(&fc->blocked_waitq);
  328. if (fc->num_background == fc->congestion_threshold &&
  329. fc->connected && fc->bdi_initialized) {
  330. clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
  331. clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
  332. }
  333. fc->num_background--;
  334. fc->active_background--;
  335. flush_bg_queue(fc);
  336. spin_unlock(&fc->lock);
  337. }
  338. wake_up(&req->waitq);
  339. if (req->end)
  340. req->end(fc, req);
  341. fuse_put_request(fc, req);
  342. }
  343. static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
  344. {
  345. spin_lock(&fiq->waitq.lock);
  346. if (list_empty(&req->intr_entry)) {
  347. list_add_tail(&req->intr_entry, &fiq->interrupts);
  348. wake_up_locked(&fiq->waitq);
  349. }
  350. spin_unlock(&fiq->waitq.lock);
  351. kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
  352. }
  353. static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
  354. {
  355. struct fuse_iqueue *fiq = &fc->iq;
  356. int err;
  357. if (!fc->no_interrupt) {
  358. /* Any signal may interrupt this */
  359. err = wait_event_interruptible(req->waitq,
  360. test_bit(FR_FINISHED, &req->flags));
  361. if (!err)
  362. return;
  363. set_bit(FR_INTERRUPTED, &req->flags);
  364. /* matches barrier in fuse_dev_do_read() */
  365. smp_mb__after_atomic();
  366. if (test_bit(FR_SENT, &req->flags))
  367. queue_interrupt(fiq, req);
  368. }
  369. if (!test_bit(FR_FORCE, &req->flags)) {
  370. /* Only fatal signals may interrupt this */
  371. err = wait_event_killable(req->waitq,
  372. test_bit(FR_FINISHED, &req->flags));
  373. if (!err)
  374. return;
  375. spin_lock(&fiq->waitq.lock);
  376. /* Request is not yet in userspace, bail out */
  377. if (test_bit(FR_PENDING, &req->flags)) {
  378. list_del(&req->list);
  379. spin_unlock(&fiq->waitq.lock);
  380. __fuse_put_request(req);
  381. req->out.h.error = -EINTR;
  382. return;
  383. }
  384. spin_unlock(&fiq->waitq.lock);
  385. }
  386. /*
  387. * Either request is already in userspace, or it was forced.
  388. * Wait it out.
  389. */
  390. wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
  391. }
  392. static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
  393. {
  394. struct fuse_iqueue *fiq = &fc->iq;
  395. BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
  396. spin_lock(&fiq->waitq.lock);
  397. if (!fiq->connected) {
  398. spin_unlock(&fiq->waitq.lock);
  399. req->out.h.error = -ENOTCONN;
  400. } else {
  401. req->in.h.unique = fuse_get_unique(fiq);
  402. queue_request(fiq, req);
  403. /* acquire extra reference, since request is still needed
  404. after request_end() */
  405. __fuse_get_request(req);
  406. spin_unlock(&fiq->waitq.lock);
  407. request_wait_answer(fc, req);
  408. /* Pairs with smp_wmb() in request_end() */
  409. smp_rmb();
  410. }
  411. }
  412. void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
  413. {
  414. __set_bit(FR_ISREPLY, &req->flags);
  415. if (!test_bit(FR_WAITING, &req->flags)) {
  416. __set_bit(FR_WAITING, &req->flags);
  417. atomic_inc(&fc->num_waiting);
  418. }
  419. __fuse_request_send(fc, req);
  420. }
  421. EXPORT_SYMBOL_GPL(fuse_request_send);
  422. static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
  423. {
  424. if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
  425. args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
  426. if (fc->minor < 9) {
  427. switch (args->in.h.opcode) {
  428. case FUSE_LOOKUP:
  429. case FUSE_CREATE:
  430. case FUSE_MKNOD:
  431. case FUSE_MKDIR:
  432. case FUSE_SYMLINK:
  433. case FUSE_LINK:
  434. args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
  435. break;
  436. case FUSE_GETATTR:
  437. case FUSE_SETATTR:
  438. args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
  439. break;
  440. }
  441. }
  442. if (fc->minor < 12) {
  443. switch (args->in.h.opcode) {
  444. case FUSE_CREATE:
  445. args->in.args[0].size = sizeof(struct fuse_open_in);
  446. break;
  447. case FUSE_MKNOD:
  448. args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
  449. break;
  450. }
  451. }
  452. }
  453. ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
  454. {
  455. struct fuse_req *req;
  456. ssize_t ret;
  457. req = fuse_get_req(fc, 0);
  458. if (IS_ERR(req))
  459. return PTR_ERR(req);
  460. /* Needs to be done after fuse_get_req() so that fc->minor is valid */
  461. fuse_adjust_compat(fc, args);
  462. req->in.h.opcode = args->in.h.opcode;
  463. req->in.h.nodeid = args->in.h.nodeid;
  464. req->in.numargs = args->in.numargs;
  465. memcpy(req->in.args, args->in.args,
  466. args->in.numargs * sizeof(struct fuse_in_arg));
  467. req->out.argvar = args->out.argvar;
  468. req->out.numargs = args->out.numargs;
  469. memcpy(req->out.args, args->out.args,
  470. args->out.numargs * sizeof(struct fuse_arg));
  471. fuse_request_send(fc, req);
  472. ret = req->out.h.error;
  473. if (!ret && args->out.argvar) {
  474. BUG_ON(args->out.numargs != 1);
  475. ret = req->out.args[0].size;
  476. }
  477. fuse_put_request(fc, req);
  478. return ret;
  479. }
  480. /*
  481. * Called under fc->lock
  482. *
  483. * fc->connected must have been checked previously
  484. */
  485. void fuse_request_send_background_locked(struct fuse_conn *fc,
  486. struct fuse_req *req)
  487. {
  488. BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
  489. if (!test_bit(FR_WAITING, &req->flags)) {
  490. __set_bit(FR_WAITING, &req->flags);
  491. atomic_inc(&fc->num_waiting);
  492. }
  493. __set_bit(FR_ISREPLY, &req->flags);
  494. fc->num_background++;
  495. if (fc->num_background == fc->max_background)
  496. fc->blocked = 1;
  497. if (fc->num_background == fc->congestion_threshold &&
  498. fc->bdi_initialized) {
  499. set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
  500. set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
  501. }
  502. list_add_tail(&req->list, &fc->bg_queue);
  503. flush_bg_queue(fc);
  504. }
  505. void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
  506. {
  507. BUG_ON(!req->end);
  508. spin_lock(&fc->lock);
  509. if (fc->connected) {
  510. fuse_request_send_background_locked(fc, req);
  511. spin_unlock(&fc->lock);
  512. } else {
  513. spin_unlock(&fc->lock);
  514. req->out.h.error = -ENOTCONN;
  515. req->end(fc, req);
  516. fuse_put_request(fc, req);
  517. }
  518. }
  519. EXPORT_SYMBOL_GPL(fuse_request_send_background);
  520. static int fuse_request_send_notify_reply(struct fuse_conn *fc,
  521. struct fuse_req *req, u64 unique)
  522. {
  523. int err = -ENODEV;
  524. struct fuse_iqueue *fiq = &fc->iq;
  525. __clear_bit(FR_ISREPLY, &req->flags);
  526. req->in.h.unique = unique;
  527. spin_lock(&fiq->waitq.lock);
  528. if (fiq->connected) {
  529. queue_request(fiq, req);
  530. err = 0;
  531. }
  532. spin_unlock(&fiq->waitq.lock);
  533. return err;
  534. }
  535. void fuse_force_forget(struct file *file, u64 nodeid)
  536. {
  537. struct inode *inode = file_inode(file);
  538. struct fuse_conn *fc = get_fuse_conn(inode);
  539. struct fuse_req *req;
  540. struct fuse_forget_in inarg;
  541. memset(&inarg, 0, sizeof(inarg));
  542. inarg.nlookup = 1;
  543. req = fuse_get_req_nofail_nopages(fc, file);
  544. req->in.h.opcode = FUSE_FORGET;
  545. req->in.h.nodeid = nodeid;
  546. req->in.numargs = 1;
  547. req->in.args[0].size = sizeof(inarg);
  548. req->in.args[0].value = &inarg;
  549. __clear_bit(FR_ISREPLY, &req->flags);
  550. __fuse_request_send(fc, req);
  551. /* ignore errors */
  552. fuse_put_request(fc, req);
  553. }
  554. /*
  555. * Lock the request. Up to the next unlock_request() there mustn't be
  556. * anything that could cause a page-fault. If the request was already
  557. * aborted bail out.
  558. */
  559. static int lock_request(struct fuse_req *req)
  560. {
  561. int err = 0;
  562. if (req) {
  563. spin_lock(&req->waitq.lock);
  564. if (test_bit(FR_ABORTED, &req->flags))
  565. err = -ENOENT;
  566. else
  567. set_bit(FR_LOCKED, &req->flags);
  568. spin_unlock(&req->waitq.lock);
  569. }
  570. return err;
  571. }
  572. /*
  573. * Unlock request. If it was aborted while locked, caller is responsible
  574. * for unlocking and ending the request.
  575. */
  576. static int unlock_request(struct fuse_req *req)
  577. {
  578. int err = 0;
  579. if (req) {
  580. spin_lock(&req->waitq.lock);
  581. if (test_bit(FR_ABORTED, &req->flags))
  582. err = -ENOENT;
  583. else
  584. clear_bit(FR_LOCKED, &req->flags);
  585. spin_unlock(&req->waitq.lock);
  586. }
  587. return err;
  588. }
  589. struct fuse_copy_state {
  590. int write;
  591. struct fuse_req *req;
  592. struct iov_iter *iter;
  593. struct pipe_buffer *pipebufs;
  594. struct pipe_buffer *currbuf;
  595. struct pipe_inode_info *pipe;
  596. unsigned long nr_segs;
  597. struct page *pg;
  598. unsigned len;
  599. unsigned offset;
  600. unsigned move_pages:1;
  601. };
  602. static void fuse_copy_init(struct fuse_copy_state *cs, int write,
  603. struct iov_iter *iter)
  604. {
  605. memset(cs, 0, sizeof(*cs));
  606. cs->write = write;
  607. cs->iter = iter;
  608. }
  609. /* Unmap and put previous page of userspace buffer */
  610. static void fuse_copy_finish(struct fuse_copy_state *cs)
  611. {
  612. if (cs->currbuf) {
  613. struct pipe_buffer *buf = cs->currbuf;
  614. if (cs->write)
  615. buf->len = PAGE_SIZE - cs->len;
  616. cs->currbuf = NULL;
  617. } else if (cs->pg) {
  618. if (cs->write) {
  619. flush_dcache_page(cs->pg);
  620. set_page_dirty_lock(cs->pg);
  621. }
  622. put_page(cs->pg);
  623. }
  624. cs->pg = NULL;
  625. }
  626. /*
  627. * Get another pagefull of userspace buffer, and map it to kernel
  628. * address space, and lock request
  629. */
  630. static int fuse_copy_fill(struct fuse_copy_state *cs)
  631. {
  632. struct page *page;
  633. int err;
  634. err = unlock_request(cs->req);
  635. if (err)
  636. return err;
  637. fuse_copy_finish(cs);
  638. if (cs->pipebufs) {
  639. struct pipe_buffer *buf = cs->pipebufs;
  640. if (!cs->write) {
  641. err = buf->ops->confirm(cs->pipe, buf);
  642. if (err)
  643. return err;
  644. BUG_ON(!cs->nr_segs);
  645. cs->currbuf = buf;
  646. cs->pg = buf->page;
  647. cs->offset = buf->offset;
  648. cs->len = buf->len;
  649. cs->pipebufs++;
  650. cs->nr_segs--;
  651. } else {
  652. if (cs->nr_segs == cs->pipe->buffers)
  653. return -EIO;
  654. page = alloc_page(GFP_HIGHUSER);
  655. if (!page)
  656. return -ENOMEM;
  657. buf->page = page;
  658. buf->offset = 0;
  659. buf->len = 0;
  660. cs->currbuf = buf;
  661. cs->pg = page;
  662. cs->offset = 0;
  663. cs->len = PAGE_SIZE;
  664. cs->pipebufs++;
  665. cs->nr_segs++;
  666. }
  667. } else {
  668. size_t off;
  669. err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
  670. if (err < 0)
  671. return err;
  672. BUG_ON(!err);
  673. cs->len = err;
  674. cs->offset = off;
  675. cs->pg = page;
  676. cs->offset = off;
  677. iov_iter_advance(cs->iter, err);
  678. }
  679. return lock_request(cs->req);
  680. }
  681. /* Do as much copy to/from userspace buffer as we can */
  682. static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
  683. {
  684. unsigned ncpy = min(*size, cs->len);
  685. if (val) {
  686. void *pgaddr = kmap_atomic(cs->pg);
  687. void *buf = pgaddr + cs->offset;
  688. if (cs->write)
  689. memcpy(buf, *val, ncpy);
  690. else
  691. memcpy(*val, buf, ncpy);
  692. kunmap_atomic(pgaddr);
  693. *val += ncpy;
  694. }
  695. *size -= ncpy;
  696. cs->len -= ncpy;
  697. cs->offset += ncpy;
  698. return ncpy;
  699. }
  700. static int fuse_check_page(struct page *page)
  701. {
  702. if (page_mapcount(page) ||
  703. page->mapping != NULL ||
  704. page_count(page) != 1 ||
  705. (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
  706. ~(1 << PG_locked |
  707. 1 << PG_referenced |
  708. 1 << PG_uptodate |
  709. 1 << PG_lru |
  710. 1 << PG_active |
  711. 1 << PG_reclaim))) {
  712. printk(KERN_WARNING "fuse: trying to steal weird page\n");
  713. printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
  714. return 1;
  715. }
  716. return 0;
  717. }
  718. static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
  719. {
  720. int err;
  721. struct page *oldpage = *pagep;
  722. struct page *newpage;
  723. struct pipe_buffer *buf = cs->pipebufs;
  724. err = unlock_request(cs->req);
  725. if (err)
  726. return err;
  727. fuse_copy_finish(cs);
  728. err = buf->ops->confirm(cs->pipe, buf);
  729. if (err)
  730. return err;
  731. BUG_ON(!cs->nr_segs);
  732. cs->currbuf = buf;
  733. cs->len = buf->len;
  734. cs->pipebufs++;
  735. cs->nr_segs--;
  736. if (cs->len != PAGE_SIZE)
  737. goto out_fallback;
  738. if (buf->ops->steal(cs->pipe, buf) != 0)
  739. goto out_fallback;
  740. newpage = buf->page;
  741. if (!PageUptodate(newpage))
  742. SetPageUptodate(newpage);
  743. ClearPageMappedToDisk(newpage);
  744. if (fuse_check_page(newpage) != 0)
  745. goto out_fallback_unlock;
  746. /*
  747. * This is a new and locked page, it shouldn't be mapped or
  748. * have any special flags on it
  749. */
  750. if (WARN_ON(page_mapped(oldpage)))
  751. goto out_fallback_unlock;
  752. if (WARN_ON(page_has_private(oldpage)))
  753. goto out_fallback_unlock;
  754. if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
  755. goto out_fallback_unlock;
  756. if (WARN_ON(PageMlocked(oldpage)))
  757. goto out_fallback_unlock;
  758. err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
  759. if (err) {
  760. unlock_page(newpage);
  761. return err;
  762. }
  763. get_page(newpage);
  764. if (!(buf->flags & PIPE_BUF_FLAG_LRU))
  765. lru_cache_add_file(newpage);
  766. err = 0;
  767. spin_lock(&cs->req->waitq.lock);
  768. if (test_bit(FR_ABORTED, &cs->req->flags))
  769. err = -ENOENT;
  770. else
  771. *pagep = newpage;
  772. spin_unlock(&cs->req->waitq.lock);
  773. if (err) {
  774. unlock_page(newpage);
  775. put_page(newpage);
  776. return err;
  777. }
  778. unlock_page(oldpage);
  779. put_page(oldpage);
  780. cs->len = 0;
  781. return 0;
  782. out_fallback_unlock:
  783. unlock_page(newpage);
  784. out_fallback:
  785. cs->pg = buf->page;
  786. cs->offset = buf->offset;
  787. err = lock_request(cs->req);
  788. if (err)
  789. return err;
  790. return 1;
  791. }
  792. static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
  793. unsigned offset, unsigned count)
  794. {
  795. struct pipe_buffer *buf;
  796. int err;
  797. if (cs->nr_segs == cs->pipe->buffers)
  798. return -EIO;
  799. err = unlock_request(cs->req);
  800. if (err)
  801. return err;
  802. fuse_copy_finish(cs);
  803. buf = cs->pipebufs;
  804. get_page(page);
  805. buf->page = page;
  806. buf->offset = offset;
  807. buf->len = count;
  808. cs->pipebufs++;
  809. cs->nr_segs++;
  810. cs->len = 0;
  811. return 0;
  812. }
  813. /*
  814. * Copy a page in the request to/from the userspace buffer. Must be
  815. * done atomically
  816. */
  817. static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
  818. unsigned offset, unsigned count, int zeroing)
  819. {
  820. int err;
  821. struct page *page = *pagep;
  822. if (page && zeroing && count < PAGE_SIZE)
  823. clear_highpage(page);
  824. while (count) {
  825. if (cs->write && cs->pipebufs && page) {
  826. return fuse_ref_page(cs, page, offset, count);
  827. } else if (!cs->len) {
  828. if (cs->move_pages && page &&
  829. offset == 0 && count == PAGE_SIZE) {
  830. err = fuse_try_move_page(cs, pagep);
  831. if (err <= 0)
  832. return err;
  833. } else {
  834. err = fuse_copy_fill(cs);
  835. if (err)
  836. return err;
  837. }
  838. }
  839. if (page) {
  840. void *mapaddr = kmap_atomic(page);
  841. void *buf = mapaddr + offset;
  842. offset += fuse_copy_do(cs, &buf, &count);
  843. kunmap_atomic(mapaddr);
  844. } else
  845. offset += fuse_copy_do(cs, NULL, &count);
  846. }
  847. if (page && !cs->write)
  848. flush_dcache_page(page);
  849. return 0;
  850. }
  851. /* Copy pages in the request to/from userspace buffer */
  852. static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
  853. int zeroing)
  854. {
  855. unsigned i;
  856. struct fuse_req *req = cs->req;
  857. for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
  858. int err;
  859. unsigned offset = req->page_descs[i].offset;
  860. unsigned count = min(nbytes, req->page_descs[i].length);
  861. err = fuse_copy_page(cs, &req->pages[i], offset, count,
  862. zeroing);
  863. if (err)
  864. return err;
  865. nbytes -= count;
  866. }
  867. return 0;
  868. }
  869. /* Copy a single argument in the request to/from userspace buffer */
  870. static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
  871. {
  872. while (size) {
  873. if (!cs->len) {
  874. int err = fuse_copy_fill(cs);
  875. if (err)
  876. return err;
  877. }
  878. fuse_copy_do(cs, &val, &size);
  879. }
  880. return 0;
  881. }
  882. /* Copy request arguments to/from userspace buffer */
  883. static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
  884. unsigned argpages, struct fuse_arg *args,
  885. int zeroing)
  886. {
  887. int err = 0;
  888. unsigned i;
  889. for (i = 0; !err && i < numargs; i++) {
  890. struct fuse_arg *arg = &args[i];
  891. if (i == numargs - 1 && argpages)
  892. err = fuse_copy_pages(cs, arg->size, zeroing);
  893. else
  894. err = fuse_copy_one(cs, arg->value, arg->size);
  895. }
  896. return err;
  897. }
  898. static int forget_pending(struct fuse_iqueue *fiq)
  899. {
  900. return fiq->forget_list_head.next != NULL;
  901. }
  902. static int request_pending(struct fuse_iqueue *fiq)
  903. {
  904. return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
  905. forget_pending(fiq);
  906. }
  907. /*
  908. * Transfer an interrupt request to userspace
  909. *
  910. * Unlike other requests this is assembled on demand, without a need
  911. * to allocate a separate fuse_req structure.
  912. *
  913. * Called with fiq->waitq.lock held, releases it
  914. */
  915. static int fuse_read_interrupt(struct fuse_iqueue *fiq,
  916. struct fuse_copy_state *cs,
  917. size_t nbytes, struct fuse_req *req)
  918. __releases(fiq->waitq.lock)
  919. {
  920. struct fuse_in_header ih;
  921. struct fuse_interrupt_in arg;
  922. unsigned reqsize = sizeof(ih) + sizeof(arg);
  923. int err;
  924. list_del_init(&req->intr_entry);
  925. req->intr_unique = fuse_get_unique(fiq);
  926. memset(&ih, 0, sizeof(ih));
  927. memset(&arg, 0, sizeof(arg));
  928. ih.len = reqsize;
  929. ih.opcode = FUSE_INTERRUPT;
  930. ih.unique = req->intr_unique;
  931. arg.unique = req->in.h.unique;
  932. spin_unlock(&fiq->waitq.lock);
  933. if (nbytes < reqsize)
  934. return -EINVAL;
  935. err = fuse_copy_one(cs, &ih, sizeof(ih));
  936. if (!err)
  937. err = fuse_copy_one(cs, &arg, sizeof(arg));
  938. fuse_copy_finish(cs);
  939. return err ? err : reqsize;
  940. }
  941. static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
  942. unsigned max,
  943. unsigned *countp)
  944. {
  945. struct fuse_forget_link *head = fiq->forget_list_head.next;
  946. struct fuse_forget_link **newhead = &head;
  947. unsigned count;
  948. for (count = 0; *newhead != NULL && count < max; count++)
  949. newhead = &(*newhead)->next;
  950. fiq->forget_list_head.next = *newhead;
  951. *newhead = NULL;
  952. if (fiq->forget_list_head.next == NULL)
  953. fiq->forget_list_tail = &fiq->forget_list_head;
  954. if (countp != NULL)
  955. *countp = count;
  956. return head;
  957. }
  958. static int fuse_read_single_forget(struct fuse_iqueue *fiq,
  959. struct fuse_copy_state *cs,
  960. size_t nbytes)
  961. __releases(fiq->waitq.lock)
  962. {
  963. int err;
  964. struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
  965. struct fuse_forget_in arg = {
  966. .nlookup = forget->forget_one.nlookup,
  967. };
  968. struct fuse_in_header ih = {
  969. .opcode = FUSE_FORGET,
  970. .nodeid = forget->forget_one.nodeid,
  971. .unique = fuse_get_unique(fiq),
  972. .len = sizeof(ih) + sizeof(arg),
  973. };
  974. spin_unlock(&fiq->waitq.lock);
  975. kfree(forget);
  976. if (nbytes < ih.len)
  977. return -EINVAL;
  978. err = fuse_copy_one(cs, &ih, sizeof(ih));
  979. if (!err)
  980. err = fuse_copy_one(cs, &arg, sizeof(arg));
  981. fuse_copy_finish(cs);
  982. if (err)
  983. return err;
  984. return ih.len;
  985. }
  986. static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
  987. struct fuse_copy_state *cs, size_t nbytes)
  988. __releases(fiq->waitq.lock)
  989. {
  990. int err;
  991. unsigned max_forgets;
  992. unsigned count;
  993. struct fuse_forget_link *head;
  994. struct fuse_batch_forget_in arg = { .count = 0 };
  995. struct fuse_in_header ih = {
  996. .opcode = FUSE_BATCH_FORGET,
  997. .unique = fuse_get_unique(fiq),
  998. .len = sizeof(ih) + sizeof(arg),
  999. };
  1000. if (nbytes < ih.len) {
  1001. spin_unlock(&fiq->waitq.lock);
  1002. return -EINVAL;
  1003. }
  1004. max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
  1005. head = dequeue_forget(fiq, max_forgets, &count);
  1006. spin_unlock(&fiq->waitq.lock);
  1007. arg.count = count;
  1008. ih.len += count * sizeof(struct fuse_forget_one);
  1009. err = fuse_copy_one(cs, &ih, sizeof(ih));
  1010. if (!err)
  1011. err = fuse_copy_one(cs, &arg, sizeof(arg));
  1012. while (head) {
  1013. struct fuse_forget_link *forget = head;
  1014. if (!err) {
  1015. err = fuse_copy_one(cs, &forget->forget_one,
  1016. sizeof(forget->forget_one));
  1017. }
  1018. head = forget->next;
  1019. kfree(forget);
  1020. }
  1021. fuse_copy_finish(cs);
  1022. if (err)
  1023. return err;
  1024. return ih.len;
  1025. }
  1026. static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
  1027. struct fuse_copy_state *cs,
  1028. size_t nbytes)
  1029. __releases(fiq->waitq.lock)
  1030. {
  1031. if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
  1032. return fuse_read_single_forget(fiq, cs, nbytes);
  1033. else
  1034. return fuse_read_batch_forget(fiq, cs, nbytes);
  1035. }
  1036. /*
  1037. * Read a single request into the userspace filesystem's buffer. This
  1038. * function waits until a request is available, then removes it from
  1039. * the pending list and copies request data to userspace buffer. If
  1040. * no reply is needed (FORGET) or request has been aborted or there
  1041. * was an error during the copying then it's finished by calling
  1042. * request_end(). Otherwise add it to the processing list, and set
  1043. * the 'sent' flag.
  1044. */
  1045. static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
  1046. struct fuse_copy_state *cs, size_t nbytes)
  1047. {
  1048. ssize_t err;
  1049. struct fuse_conn *fc = fud->fc;
  1050. struct fuse_iqueue *fiq = &fc->iq;
  1051. struct fuse_pqueue *fpq = &fud->pq;
  1052. struct fuse_req *req;
  1053. struct fuse_in *in;
  1054. unsigned reqsize;
  1055. restart:
  1056. spin_lock(&fiq->waitq.lock);
  1057. err = -EAGAIN;
  1058. if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
  1059. !request_pending(fiq))
  1060. goto err_unlock;
  1061. err = wait_event_interruptible_exclusive_locked(fiq->waitq,
  1062. !fiq->connected || request_pending(fiq));
  1063. if (err)
  1064. goto err_unlock;
  1065. err = -ENODEV;
  1066. if (!fiq->connected)
  1067. goto err_unlock;
  1068. if (!list_empty(&fiq->interrupts)) {
  1069. req = list_entry(fiq->interrupts.next, struct fuse_req,
  1070. intr_entry);
  1071. return fuse_read_interrupt(fiq, cs, nbytes, req);
  1072. }
  1073. if (forget_pending(fiq)) {
  1074. if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
  1075. return fuse_read_forget(fc, fiq, cs, nbytes);
  1076. if (fiq->forget_batch <= -8)
  1077. fiq->forget_batch = 16;
  1078. }
  1079. req = list_entry(fiq->pending.next, struct fuse_req, list);
  1080. clear_bit(FR_PENDING, &req->flags);
  1081. list_del_init(&req->list);
  1082. spin_unlock(&fiq->waitq.lock);
  1083. in = &req->in;
  1084. reqsize = in->h.len;
  1085. /* If request is too large, reply with an error and restart the read */
  1086. if (nbytes < reqsize) {
  1087. req->out.h.error = -EIO;
  1088. /* SETXATTR is special, since it may contain too large data */
  1089. if (in->h.opcode == FUSE_SETXATTR)
  1090. req->out.h.error = -E2BIG;
  1091. request_end(fc, req);
  1092. goto restart;
  1093. }
  1094. spin_lock(&fpq->lock);
  1095. list_add(&req->list, &fpq->io);
  1096. spin_unlock(&fpq->lock);
  1097. cs->req = req;
  1098. err = fuse_copy_one(cs, &in->h, sizeof(in->h));
  1099. if (!err)
  1100. err = fuse_copy_args(cs, in->numargs, in->argpages,
  1101. (struct fuse_arg *) in->args, 0);
  1102. fuse_copy_finish(cs);
  1103. spin_lock(&fpq->lock);
  1104. clear_bit(FR_LOCKED, &req->flags);
  1105. if (!fpq->connected) {
  1106. err = -ENODEV;
  1107. goto out_end;
  1108. }
  1109. if (err) {
  1110. req->out.h.error = -EIO;
  1111. goto out_end;
  1112. }
  1113. if (!test_bit(FR_ISREPLY, &req->flags)) {
  1114. err = reqsize;
  1115. goto out_end;
  1116. }
  1117. list_move_tail(&req->list, &fpq->processing);
  1118. spin_unlock(&fpq->lock);
  1119. set_bit(FR_SENT, &req->flags);
  1120. /* matches barrier in request_wait_answer() */
  1121. smp_mb__after_atomic();
  1122. if (test_bit(FR_INTERRUPTED, &req->flags))
  1123. queue_interrupt(fiq, req);
  1124. return reqsize;
  1125. out_end:
  1126. if (!test_bit(FR_PRIVATE, &req->flags))
  1127. list_del_init(&req->list);
  1128. spin_unlock(&fpq->lock);
  1129. request_end(fc, req);
  1130. return err;
  1131. err_unlock:
  1132. spin_unlock(&fiq->waitq.lock);
  1133. return err;
  1134. }
  1135. static int fuse_dev_open(struct inode *inode, struct file *file)
  1136. {
  1137. /*
  1138. * The fuse device's file's private_data is used to hold
  1139. * the fuse_conn(ection) when it is mounted, and is used to
  1140. * keep track of whether the file has been mounted already.
  1141. */
  1142. file->private_data = NULL;
  1143. return 0;
  1144. }
  1145. static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
  1146. {
  1147. struct fuse_copy_state cs;
  1148. struct file *file = iocb->ki_filp;
  1149. struct fuse_dev *fud = fuse_get_dev(file);
  1150. if (!fud)
  1151. return -EPERM;
  1152. if (!iter_is_iovec(to))
  1153. return -EINVAL;
  1154. fuse_copy_init(&cs, 1, to);
  1155. return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
  1156. }
  1157. static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
  1158. struct pipe_inode_info *pipe,
  1159. size_t len, unsigned int flags)
  1160. {
  1161. int ret;
  1162. int page_nr = 0;
  1163. int do_wakeup = 0;
  1164. struct pipe_buffer *bufs;
  1165. struct fuse_copy_state cs;
  1166. struct fuse_dev *fud = fuse_get_dev(in);
  1167. if (!fud)
  1168. return -EPERM;
  1169. bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
  1170. if (!bufs)
  1171. return -ENOMEM;
  1172. fuse_copy_init(&cs, 1, NULL);
  1173. cs.pipebufs = bufs;
  1174. cs.pipe = pipe;
  1175. ret = fuse_dev_do_read(fud, in, &cs, len);
  1176. if (ret < 0)
  1177. goto out;
  1178. ret = 0;
  1179. pipe_lock(pipe);
  1180. if (!pipe->readers) {
  1181. send_sig(SIGPIPE, current, 0);
  1182. if (!ret)
  1183. ret = -EPIPE;
  1184. goto out_unlock;
  1185. }
  1186. if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
  1187. ret = -EIO;
  1188. goto out_unlock;
  1189. }
  1190. while (page_nr < cs.nr_segs) {
  1191. int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
  1192. struct pipe_buffer *buf = pipe->bufs + newbuf;
  1193. buf->page = bufs[page_nr].page;
  1194. buf->offset = bufs[page_nr].offset;
  1195. buf->len = bufs[page_nr].len;
  1196. /*
  1197. * Need to be careful about this. Having buf->ops in module
  1198. * code can Oops if the buffer persists after module unload.
  1199. */
  1200. buf->ops = &nosteal_pipe_buf_ops;
  1201. pipe->nrbufs++;
  1202. page_nr++;
  1203. ret += buf->len;
  1204. if (pipe->files)
  1205. do_wakeup = 1;
  1206. }
  1207. out_unlock:
  1208. pipe_unlock(pipe);
  1209. if (do_wakeup) {
  1210. smp_mb();
  1211. if (waitqueue_active(&pipe->wait))
  1212. wake_up_interruptible(&pipe->wait);
  1213. kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
  1214. }
  1215. out:
  1216. for (; page_nr < cs.nr_segs; page_nr++)
  1217. put_page(bufs[page_nr].page);
  1218. kfree(bufs);
  1219. return ret;
  1220. }
  1221. static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
  1222. struct fuse_copy_state *cs)
  1223. {
  1224. struct fuse_notify_poll_wakeup_out outarg;
  1225. int err = -EINVAL;
  1226. if (size != sizeof(outarg))
  1227. goto err;
  1228. err = fuse_copy_one(cs, &outarg, sizeof(outarg));
  1229. if (err)
  1230. goto err;
  1231. fuse_copy_finish(cs);
  1232. return fuse_notify_poll_wakeup(fc, &outarg);
  1233. err:
  1234. fuse_copy_finish(cs);
  1235. return err;
  1236. }
  1237. static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
  1238. struct fuse_copy_state *cs)
  1239. {
  1240. struct fuse_notify_inval_inode_out outarg;
  1241. int err = -EINVAL;
  1242. if (size != sizeof(outarg))
  1243. goto err;
  1244. err = fuse_copy_one(cs, &outarg, sizeof(outarg));
  1245. if (err)
  1246. goto err;
  1247. fuse_copy_finish(cs);
  1248. down_read(&fc->killsb);
  1249. err = -ENOENT;
  1250. if (fc->sb) {
  1251. err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
  1252. outarg.off, outarg.len);
  1253. }
  1254. up_read(&fc->killsb);
  1255. return err;
  1256. err:
  1257. fuse_copy_finish(cs);
  1258. return err;
  1259. }
  1260. static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
  1261. struct fuse_copy_state *cs)
  1262. {
  1263. struct fuse_notify_inval_entry_out outarg;
  1264. int err = -ENOMEM;
  1265. char *buf;
  1266. struct qstr name;
  1267. buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
  1268. if (!buf)
  1269. goto err;
  1270. err = -EINVAL;
  1271. if (size < sizeof(outarg))
  1272. goto err;
  1273. err = fuse_copy_one(cs, &outarg, sizeof(outarg));
  1274. if (err)
  1275. goto err;
  1276. err = -ENAMETOOLONG;
  1277. if (outarg.namelen > FUSE_NAME_MAX)
  1278. goto err;
  1279. err = -EINVAL;
  1280. if (size != sizeof(outarg) + outarg.namelen + 1)
  1281. goto err;
  1282. name.name = buf;
  1283. name.len = outarg.namelen;
  1284. err = fuse_copy_one(cs, buf, outarg.namelen + 1);
  1285. if (err)
  1286. goto err;
  1287. fuse_copy_finish(cs);
  1288. buf[outarg.namelen] = 0;
  1289. down_read(&fc->killsb);
  1290. err = -ENOENT;
  1291. if (fc->sb)
  1292. err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
  1293. up_read(&fc->killsb);
  1294. kfree(buf);
  1295. return err;
  1296. err:
  1297. kfree(buf);
  1298. fuse_copy_finish(cs);
  1299. return err;
  1300. }
  1301. static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
  1302. struct fuse_copy_state *cs)
  1303. {
  1304. struct fuse_notify_delete_out outarg;
  1305. int err = -ENOMEM;
  1306. char *buf;
  1307. struct qstr name;
  1308. buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
  1309. if (!buf)
  1310. goto err;
  1311. err = -EINVAL;
  1312. if (size < sizeof(outarg))
  1313. goto err;
  1314. err = fuse_copy_one(cs, &outarg, sizeof(outarg));
  1315. if (err)
  1316. goto err;
  1317. err = -ENAMETOOLONG;
  1318. if (outarg.namelen > FUSE_NAME_MAX)
  1319. goto err;
  1320. err = -EINVAL;
  1321. if (size != sizeof(outarg) + outarg.namelen + 1)
  1322. goto err;
  1323. name.name = buf;
  1324. name.len = outarg.namelen;
  1325. err = fuse_copy_one(cs, buf, outarg.namelen + 1);
  1326. if (err)
  1327. goto err;
  1328. fuse_copy_finish(cs);
  1329. buf[outarg.namelen] = 0;
  1330. down_read(&fc->killsb);
  1331. err = -ENOENT;
  1332. if (fc->sb)
  1333. err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
  1334. outarg.child, &name);
  1335. up_read(&fc->killsb);
  1336. kfree(buf);
  1337. return err;
  1338. err:
  1339. kfree(buf);
  1340. fuse_copy_finish(cs);
  1341. return err;
  1342. }
  1343. static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
  1344. struct fuse_copy_state *cs)
  1345. {
  1346. struct fuse_notify_store_out outarg;
  1347. struct inode *inode;
  1348. struct address_space *mapping;
  1349. u64 nodeid;
  1350. int err;
  1351. pgoff_t index;
  1352. unsigned int offset;
  1353. unsigned int num;
  1354. loff_t file_size;
  1355. loff_t end;
  1356. err = -EINVAL;
  1357. if (size < sizeof(outarg))
  1358. goto out_finish;
  1359. err = fuse_copy_one(cs, &outarg, sizeof(outarg));
  1360. if (err)
  1361. goto out_finish;
  1362. err = -EINVAL;
  1363. if (size - sizeof(outarg) != outarg.size)
  1364. goto out_finish;
  1365. nodeid = outarg.nodeid;
  1366. down_read(&fc->killsb);
  1367. err = -ENOENT;
  1368. if (!fc->sb)
  1369. goto out_up_killsb;
  1370. inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
  1371. if (!inode)
  1372. goto out_up_killsb;
  1373. mapping = inode->i_mapping;
  1374. index = outarg.offset >> PAGE_SHIFT;
  1375. offset = outarg.offset & ~PAGE_MASK;
  1376. file_size = i_size_read(inode);
  1377. end = outarg.offset + outarg.size;
  1378. if (end > file_size) {
  1379. file_size = end;
  1380. fuse_write_update_size(inode, file_size);
  1381. }
  1382. num = outarg.size;
  1383. while (num) {
  1384. struct page *page;
  1385. unsigned int this_num;
  1386. err = -ENOMEM;
  1387. page = find_or_create_page(mapping, index,
  1388. mapping_gfp_mask(mapping));
  1389. if (!page)
  1390. goto out_iput;
  1391. this_num = min_t(unsigned, num, PAGE_SIZE - offset);
  1392. err = fuse_copy_page(cs, &page, offset, this_num, 0);
  1393. if (!err && offset == 0 &&
  1394. (this_num == PAGE_SIZE || file_size == end))
  1395. SetPageUptodate(page);
  1396. unlock_page(page);
  1397. put_page(page);
  1398. if (err)
  1399. goto out_iput;
  1400. num -= this_num;
  1401. offset = 0;
  1402. index++;
  1403. }
  1404. err = 0;
  1405. out_iput:
  1406. iput(inode);
  1407. out_up_killsb:
  1408. up_read(&fc->killsb);
  1409. out_finish:
  1410. fuse_copy_finish(cs);
  1411. return err;
  1412. }
  1413. static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
  1414. {
  1415. release_pages(req->pages, req->num_pages, false);
  1416. }
  1417. static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
  1418. struct fuse_notify_retrieve_out *outarg)
  1419. {
  1420. int err;
  1421. struct address_space *mapping = inode->i_mapping;
  1422. struct fuse_req *req;
  1423. pgoff_t index;
  1424. loff_t file_size;
  1425. unsigned int num;
  1426. unsigned int offset;
  1427. size_t total_len = 0;
  1428. int num_pages;
  1429. offset = outarg->offset & ~PAGE_MASK;
  1430. file_size = i_size_read(inode);
  1431. num = outarg->size;
  1432. if (outarg->offset > file_size)
  1433. num = 0;
  1434. else if (outarg->offset + num > file_size)
  1435. num = file_size - outarg->offset;
  1436. num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
  1437. num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
  1438. req = fuse_get_req(fc, num_pages);
  1439. if (IS_ERR(req))
  1440. return PTR_ERR(req);
  1441. req->in.h.opcode = FUSE_NOTIFY_REPLY;
  1442. req->in.h.nodeid = outarg->nodeid;
  1443. req->in.numargs = 2;
  1444. req->in.argpages = 1;
  1445. req->page_descs[0].offset = offset;
  1446. req->end = fuse_retrieve_end;
  1447. index = outarg->offset >> PAGE_SHIFT;
  1448. while (num && req->num_pages < num_pages) {
  1449. struct page *page;
  1450. unsigned int this_num;
  1451. page = find_get_page(mapping, index);
  1452. if (!page)
  1453. break;
  1454. this_num = min_t(unsigned, num, PAGE_SIZE - offset);
  1455. req->pages[req->num_pages] = page;
  1456. req->page_descs[req->num_pages].length = this_num;
  1457. req->num_pages++;
  1458. offset = 0;
  1459. num -= this_num;
  1460. total_len += this_num;
  1461. index++;
  1462. }
  1463. req->misc.retrieve_in.offset = outarg->offset;
  1464. req->misc.retrieve_in.size = total_len;
  1465. req->in.args[0].size = sizeof(req->misc.retrieve_in);
  1466. req->in.args[0].value = &req->misc.retrieve_in;
  1467. req->in.args[1].size = total_len;
  1468. err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
  1469. if (err)
  1470. fuse_retrieve_end(fc, req);
  1471. return err;
  1472. }
  1473. static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
  1474. struct fuse_copy_state *cs)
  1475. {
  1476. struct fuse_notify_retrieve_out outarg;
  1477. struct inode *inode;
  1478. int err;
  1479. err = -EINVAL;
  1480. if (size != sizeof(outarg))
  1481. goto copy_finish;
  1482. err = fuse_copy_one(cs, &outarg, sizeof(outarg));
  1483. if (err)
  1484. goto copy_finish;
  1485. fuse_copy_finish(cs);
  1486. down_read(&fc->killsb);
  1487. err = -ENOENT;
  1488. if (fc->sb) {
  1489. u64 nodeid = outarg.nodeid;
  1490. inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
  1491. if (inode) {
  1492. err = fuse_retrieve(fc, inode, &outarg);
  1493. iput(inode);
  1494. }
  1495. }
  1496. up_read(&fc->killsb);
  1497. return err;
  1498. copy_finish:
  1499. fuse_copy_finish(cs);
  1500. return err;
  1501. }
  1502. static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
  1503. unsigned int size, struct fuse_copy_state *cs)
  1504. {
  1505. /* Don't try to move pages (yet) */
  1506. cs->move_pages = 0;
  1507. switch (code) {
  1508. case FUSE_NOTIFY_POLL:
  1509. return fuse_notify_poll(fc, size, cs);
  1510. case FUSE_NOTIFY_INVAL_INODE:
  1511. return fuse_notify_inval_inode(fc, size, cs);
  1512. case FUSE_NOTIFY_INVAL_ENTRY:
  1513. return fuse_notify_inval_entry(fc, size, cs);
  1514. case FUSE_NOTIFY_STORE:
  1515. return fuse_notify_store(fc, size, cs);
  1516. case FUSE_NOTIFY_RETRIEVE:
  1517. return fuse_notify_retrieve(fc, size, cs);
  1518. case FUSE_NOTIFY_DELETE:
  1519. return fuse_notify_delete(fc, size, cs);
  1520. default:
  1521. fuse_copy_finish(cs);
  1522. return -EINVAL;
  1523. }
  1524. }
  1525. /* Look up request on processing list by unique ID */
  1526. static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
  1527. {
  1528. struct fuse_req *req;
  1529. list_for_each_entry(req, &fpq->processing, list) {
  1530. if (req->in.h.unique == unique || req->intr_unique == unique)
  1531. return req;
  1532. }
  1533. return NULL;
  1534. }
  1535. static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
  1536. unsigned nbytes)
  1537. {
  1538. unsigned reqsize = sizeof(struct fuse_out_header);
  1539. if (out->h.error)
  1540. return nbytes != reqsize ? -EINVAL : 0;
  1541. reqsize += len_args(out->numargs, out->args);
  1542. if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
  1543. return -EINVAL;
  1544. else if (reqsize > nbytes) {
  1545. struct fuse_arg *lastarg = &out->args[out->numargs-1];
  1546. unsigned diffsize = reqsize - nbytes;
  1547. if (diffsize > lastarg->size)
  1548. return -EINVAL;
  1549. lastarg->size -= diffsize;
  1550. }
  1551. return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
  1552. out->page_zeroing);
  1553. }
  1554. /*
  1555. * Write a single reply to a request. First the header is copied from
  1556. * the write buffer. The request is then searched on the processing
  1557. * list by the unique ID found in the header. If found, then remove
  1558. * it from the list and copy the rest of the buffer to the request.
  1559. * The request is finished by calling request_end()
  1560. */
  1561. static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
  1562. struct fuse_copy_state *cs, size_t nbytes)
  1563. {
  1564. int err;
  1565. struct fuse_conn *fc = fud->fc;
  1566. struct fuse_pqueue *fpq = &fud->pq;
  1567. struct fuse_req *req;
  1568. struct fuse_out_header oh;
  1569. if (nbytes < sizeof(struct fuse_out_header))
  1570. return -EINVAL;
  1571. err = fuse_copy_one(cs, &oh, sizeof(oh));
  1572. if (err)
  1573. goto err_finish;
  1574. err = -EINVAL;
  1575. if (oh.len != nbytes)
  1576. goto err_finish;
  1577. /*
  1578. * Zero oh.unique indicates unsolicited notification message
  1579. * and error contains notification code.
  1580. */
  1581. if (!oh.unique) {
  1582. err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
  1583. return err ? err : nbytes;
  1584. }
  1585. err = -EINVAL;
  1586. if (oh.error <= -1000 || oh.error > 0)
  1587. goto err_finish;
  1588. spin_lock(&fpq->lock);
  1589. err = -ENOENT;
  1590. if (!fpq->connected)
  1591. goto err_unlock_pq;
  1592. req = request_find(fpq, oh.unique);
  1593. if (!req)
  1594. goto err_unlock_pq;
  1595. /* Is it an interrupt reply? */
  1596. if (req->intr_unique == oh.unique) {
  1597. spin_unlock(&fpq->lock);
  1598. err = -EINVAL;
  1599. if (nbytes != sizeof(struct fuse_out_header))
  1600. goto err_finish;
  1601. if (oh.error == -ENOSYS)
  1602. fc->no_interrupt = 1;
  1603. else if (oh.error == -EAGAIN)
  1604. queue_interrupt(&fc->iq, req);
  1605. fuse_copy_finish(cs);
  1606. return nbytes;
  1607. }
  1608. clear_bit(FR_SENT, &req->flags);
  1609. list_move(&req->list, &fpq->io);
  1610. req->out.h = oh;
  1611. set_bit(FR_LOCKED, &req->flags);
  1612. spin_unlock(&fpq->lock);
  1613. cs->req = req;
  1614. if (!req->out.page_replace)
  1615. cs->move_pages = 0;
  1616. err = copy_out_args(cs, &req->out, nbytes);
  1617. fuse_copy_finish(cs);
  1618. spin_lock(&fpq->lock);
  1619. clear_bit(FR_LOCKED, &req->flags);
  1620. if (!fpq->connected)
  1621. err = -ENOENT;
  1622. else if (err)
  1623. req->out.h.error = -EIO;
  1624. if (!test_bit(FR_PRIVATE, &req->flags))
  1625. list_del_init(&req->list);
  1626. spin_unlock(&fpq->lock);
  1627. request_end(fc, req);
  1628. return err ? err : nbytes;
  1629. err_unlock_pq:
  1630. spin_unlock(&fpq->lock);
  1631. err_finish:
  1632. fuse_copy_finish(cs);
  1633. return err;
  1634. }
  1635. static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
  1636. {
  1637. struct fuse_copy_state cs;
  1638. struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
  1639. if (!fud)
  1640. return -EPERM;
  1641. if (!iter_is_iovec(from))
  1642. return -EINVAL;
  1643. fuse_copy_init(&cs, 0, from);
  1644. return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
  1645. }
  1646. static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
  1647. struct file *out, loff_t *ppos,
  1648. size_t len, unsigned int flags)
  1649. {
  1650. unsigned nbuf;
  1651. unsigned idx;
  1652. struct pipe_buffer *bufs;
  1653. struct fuse_copy_state cs;
  1654. struct fuse_dev *fud;
  1655. size_t rem;
  1656. ssize_t ret;
  1657. fud = fuse_get_dev(out);
  1658. if (!fud)
  1659. return -EPERM;
  1660. bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
  1661. if (!bufs)
  1662. return -ENOMEM;
  1663. pipe_lock(pipe);
  1664. nbuf = 0;
  1665. rem = 0;
  1666. for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
  1667. rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
  1668. ret = -EINVAL;
  1669. if (rem < len) {
  1670. pipe_unlock(pipe);
  1671. goto out;
  1672. }
  1673. rem = len;
  1674. while (rem) {
  1675. struct pipe_buffer *ibuf;
  1676. struct pipe_buffer *obuf;
  1677. BUG_ON(nbuf >= pipe->buffers);
  1678. BUG_ON(!pipe->nrbufs);
  1679. ibuf = &pipe->bufs[pipe->curbuf];
  1680. obuf = &bufs[nbuf];
  1681. if (rem >= ibuf->len) {
  1682. *obuf = *ibuf;
  1683. ibuf->ops = NULL;
  1684. pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
  1685. pipe->nrbufs--;
  1686. } else {
  1687. ibuf->ops->get(pipe, ibuf);
  1688. *obuf = *ibuf;
  1689. obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
  1690. obuf->len = rem;
  1691. ibuf->offset += obuf->len;
  1692. ibuf->len -= obuf->len;
  1693. }
  1694. nbuf++;
  1695. rem -= obuf->len;
  1696. }
  1697. pipe_unlock(pipe);
  1698. fuse_copy_init(&cs, 0, NULL);
  1699. cs.pipebufs = bufs;
  1700. cs.nr_segs = nbuf;
  1701. cs.pipe = pipe;
  1702. if (flags & SPLICE_F_MOVE)
  1703. cs.move_pages = 1;
  1704. ret = fuse_dev_do_write(fud, &cs, len);
  1705. for (idx = 0; idx < nbuf; idx++) {
  1706. struct pipe_buffer *buf = &bufs[idx];
  1707. buf->ops->release(pipe, buf);
  1708. }
  1709. out:
  1710. kfree(bufs);
  1711. return ret;
  1712. }
  1713. static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
  1714. {
  1715. unsigned mask = POLLOUT | POLLWRNORM;
  1716. struct fuse_iqueue *fiq;
  1717. struct fuse_dev *fud = fuse_get_dev(file);
  1718. if (!fud)
  1719. return POLLERR;
  1720. fiq = &fud->fc->iq;
  1721. poll_wait(file, &fiq->waitq, wait);
  1722. spin_lock(&fiq->waitq.lock);
  1723. if (!fiq->connected)
  1724. mask = POLLERR;
  1725. else if (request_pending(fiq))
  1726. mask |= POLLIN | POLLRDNORM;
  1727. spin_unlock(&fiq->waitq.lock);
  1728. return mask;
  1729. }
  1730. /*
  1731. * Abort all requests on the given list (pending or processing)
  1732. *
  1733. * This function releases and reacquires fc->lock
  1734. */
  1735. static void end_requests(struct fuse_conn *fc, struct list_head *head)
  1736. {
  1737. while (!list_empty(head)) {
  1738. struct fuse_req *req;
  1739. req = list_entry(head->next, struct fuse_req, list);
  1740. req->out.h.error = -ECONNABORTED;
  1741. clear_bit(FR_PENDING, &req->flags);
  1742. clear_bit(FR_SENT, &req->flags);
  1743. list_del_init(&req->list);
  1744. request_end(fc, req);
  1745. }
  1746. }
  1747. static void end_polls(struct fuse_conn *fc)
  1748. {
  1749. struct rb_node *p;
  1750. p = rb_first(&fc->polled_files);
  1751. while (p) {
  1752. struct fuse_file *ff;
  1753. ff = rb_entry(p, struct fuse_file, polled_node);
  1754. wake_up_interruptible_all(&ff->poll_wait);
  1755. p = rb_next(p);
  1756. }
  1757. }
  1758. /*
  1759. * Abort all requests.
  1760. *
  1761. * Emergency exit in case of a malicious or accidental deadlock, or just a hung
  1762. * filesystem.
  1763. *
  1764. * The same effect is usually achievable through killing the filesystem daemon
  1765. * and all users of the filesystem. The exception is the combination of an
  1766. * asynchronous request and the tricky deadlock (see
  1767. * Documentation/filesystems/fuse.txt).
  1768. *
  1769. * Aborting requests under I/O goes as follows: 1: Separate out unlocked
  1770. * requests, they should be finished off immediately. Locked requests will be
  1771. * finished after unlock; see unlock_request(). 2: Finish off the unlocked
  1772. * requests. It is possible that some request will finish before we can. This
  1773. * is OK, the request will in that case be removed from the list before we touch
  1774. * it.
  1775. */
  1776. void fuse_abort_conn(struct fuse_conn *fc)
  1777. {
  1778. struct fuse_iqueue *fiq = &fc->iq;
  1779. spin_lock(&fc->lock);
  1780. if (fc->connected) {
  1781. struct fuse_dev *fud;
  1782. struct fuse_req *req, *next;
  1783. LIST_HEAD(to_end1);
  1784. LIST_HEAD(to_end2);
  1785. fc->connected = 0;
  1786. fc->blocked = 0;
  1787. fuse_set_initialized(fc);
  1788. list_for_each_entry(fud, &fc->devices, entry) {
  1789. struct fuse_pqueue *fpq = &fud->pq;
  1790. spin_lock(&fpq->lock);
  1791. fpq->connected = 0;
  1792. list_for_each_entry_safe(req, next, &fpq->io, list) {
  1793. req->out.h.error = -ECONNABORTED;
  1794. spin_lock(&req->waitq.lock);
  1795. set_bit(FR_ABORTED, &req->flags);
  1796. if (!test_bit(FR_LOCKED, &req->flags)) {
  1797. set_bit(FR_PRIVATE, &req->flags);
  1798. list_move(&req->list, &to_end1);
  1799. }
  1800. spin_unlock(&req->waitq.lock);
  1801. }
  1802. list_splice_init(&fpq->processing, &to_end2);
  1803. spin_unlock(&fpq->lock);
  1804. }
  1805. fc->max_background = UINT_MAX;
  1806. flush_bg_queue(fc);
  1807. spin_lock(&fiq->waitq.lock);
  1808. fiq->connected = 0;
  1809. list_splice_init(&fiq->pending, &to_end2);
  1810. while (forget_pending(fiq))
  1811. kfree(dequeue_forget(fiq, 1, NULL));
  1812. wake_up_all_locked(&fiq->waitq);
  1813. spin_unlock(&fiq->waitq.lock);
  1814. kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
  1815. end_polls(fc);
  1816. wake_up_all(&fc->blocked_waitq);
  1817. spin_unlock(&fc->lock);
  1818. while (!list_empty(&to_end1)) {
  1819. req = list_first_entry(&to_end1, struct fuse_req, list);
  1820. __fuse_get_request(req);
  1821. list_del_init(&req->list);
  1822. request_end(fc, req);
  1823. }
  1824. end_requests(fc, &to_end2);
  1825. } else {
  1826. spin_unlock(&fc->lock);
  1827. }
  1828. }
  1829. EXPORT_SYMBOL_GPL(fuse_abort_conn);
  1830. int fuse_dev_release(struct inode *inode, struct file *file)
  1831. {
  1832. struct fuse_dev *fud = fuse_get_dev(file);
  1833. if (fud) {
  1834. struct fuse_conn *fc = fud->fc;
  1835. struct fuse_pqueue *fpq = &fud->pq;
  1836. WARN_ON(!list_empty(&fpq->io));
  1837. end_requests(fc, &fpq->processing);
  1838. /* Are we the last open device? */
  1839. if (atomic_dec_and_test(&fc->dev_count)) {
  1840. WARN_ON(fc->iq.fasync != NULL);
  1841. fuse_abort_conn(fc);
  1842. }
  1843. fuse_dev_free(fud);
  1844. }
  1845. return 0;
  1846. }
  1847. EXPORT_SYMBOL_GPL(fuse_dev_release);
  1848. static int fuse_dev_fasync(int fd, struct file *file, int on)
  1849. {
  1850. struct fuse_dev *fud = fuse_get_dev(file);
  1851. if (!fud)
  1852. return -EPERM;
  1853. /* No locking - fasync_helper does its own locking */
  1854. return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
  1855. }
  1856. static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
  1857. {
  1858. struct fuse_dev *fud;
  1859. if (new->private_data)
  1860. return -EINVAL;
  1861. fud = fuse_dev_alloc(fc);
  1862. if (!fud)
  1863. return -ENOMEM;
  1864. new->private_data = fud;
  1865. atomic_inc(&fc->dev_count);
  1866. return 0;
  1867. }
  1868. static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
  1869. unsigned long arg)
  1870. {
  1871. int err = -ENOTTY;
  1872. if (cmd == FUSE_DEV_IOC_CLONE) {
  1873. int oldfd;
  1874. err = -EFAULT;
  1875. if (!get_user(oldfd, (__u32 __user *) arg)) {
  1876. struct file *old = fget(oldfd);
  1877. err = -EINVAL;
  1878. if (old) {
  1879. struct fuse_dev *fud = NULL;
  1880. /*
  1881. * Check against file->f_op because CUSE
  1882. * uses the same ioctl handler.
  1883. */
  1884. if (old->f_op == file->f_op &&
  1885. old->f_cred->user_ns == file->f_cred->user_ns)
  1886. fud = fuse_get_dev(old);
  1887. if (fud) {
  1888. mutex_lock(&fuse_mutex);
  1889. err = fuse_device_clone(fud->fc, file);
  1890. mutex_unlock(&fuse_mutex);
  1891. }
  1892. fput(old);
  1893. }
  1894. }
  1895. }
  1896. return err;
  1897. }
  1898. const struct file_operations fuse_dev_operations = {
  1899. .owner = THIS_MODULE,
  1900. .open = fuse_dev_open,
  1901. .llseek = no_llseek,
  1902. .read_iter = fuse_dev_read,
  1903. .splice_read = fuse_dev_splice_read,
  1904. .write_iter = fuse_dev_write,
  1905. .splice_write = fuse_dev_splice_write,
  1906. .poll = fuse_dev_poll,
  1907. .release = fuse_dev_release,
  1908. .fasync = fuse_dev_fasync,
  1909. .unlocked_ioctl = fuse_dev_ioctl,
  1910. .compat_ioctl = fuse_dev_ioctl,
  1911. };
  1912. EXPORT_SYMBOL_GPL(fuse_dev_operations);
  1913. static struct miscdevice fuse_miscdevice = {
  1914. .minor = FUSE_MINOR,
  1915. .name = "fuse",
  1916. .fops = &fuse_dev_operations,
  1917. };
  1918. int __init fuse_dev_init(void)
  1919. {
  1920. int err = -ENOMEM;
  1921. fuse_req_cachep = kmem_cache_create("fuse_request",
  1922. sizeof(struct fuse_req),
  1923. 0, 0, NULL);
  1924. if (!fuse_req_cachep)
  1925. goto out;
  1926. err = misc_register(&fuse_miscdevice);
  1927. if (err)
  1928. goto out_cache_clean;
  1929. return 0;
  1930. out_cache_clean:
  1931. kmem_cache_destroy(fuse_req_cachep);
  1932. out:
  1933. return err;
  1934. }
  1935. void fuse_dev_cleanup(void)
  1936. {
  1937. misc_deregister(&fuse_miscdevice);
  1938. kmem_cache_destroy(fuse_req_cachep);
  1939. }