dev.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273
  1. /*
  2. FUSE: Filesystem in Userspace
  3. Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
  4. This program can be distributed under the terms of the GNU GPL.
  5. See the file COPYING.
  6. */
  7. #include "fuse_i.h"
  8. #include <linux/init.h>
  9. #include <linux/module.h>
  10. #include <linux/poll.h>
  11. #include <linux/sched/signal.h>
  12. #include <linux/uio.h>
  13. #include <linux/miscdevice.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/file.h>
  16. #include <linux/slab.h>
  17. #include <linux/pipe_fs_i.h>
  18. #include <linux/swap.h>
  19. #include <linux/splice.h>
  20. #include <linux/sched.h>
  21. MODULE_ALIAS_MISCDEV(FUSE_MINOR);
  22. MODULE_ALIAS("devname:fuse");
  23. static struct kmem_cache *fuse_req_cachep;
  24. static struct fuse_dev *fuse_get_dev(struct file *file)
  25. {
  26. /*
  27. * Lockless access is OK, because file->private data is set
  28. * once during mount and is valid until the file is released.
  29. */
  30. return ACCESS_ONCE(file->private_data);
  31. }
  32. static void fuse_request_init(struct fuse_req *req, struct page **pages,
  33. struct fuse_page_desc *page_descs,
  34. unsigned npages)
  35. {
  36. memset(req, 0, sizeof(*req));
  37. memset(pages, 0, sizeof(*pages) * npages);
  38. memset(page_descs, 0, sizeof(*page_descs) * npages);
  39. INIT_LIST_HEAD(&req->list);
  40. INIT_LIST_HEAD(&req->intr_entry);
  41. init_waitqueue_head(&req->waitq);
  42. refcount_set(&req->count, 1);
  43. req->pages = pages;
  44. req->page_descs = page_descs;
  45. req->max_pages = npages;
  46. __set_bit(FR_PENDING, &req->flags);
  47. }
  48. static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
  49. {
  50. struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
  51. if (req) {
  52. struct page **pages;
  53. struct fuse_page_desc *page_descs;
  54. if (npages <= FUSE_REQ_INLINE_PAGES) {
  55. pages = req->inline_pages;
  56. page_descs = req->inline_page_descs;
  57. } else {
  58. pages = kmalloc(sizeof(struct page *) * npages, flags);
  59. page_descs = kmalloc(sizeof(struct fuse_page_desc) *
  60. npages, flags);
  61. }
  62. if (!pages || !page_descs) {
  63. kfree(pages);
  64. kfree(page_descs);
  65. kmem_cache_free(fuse_req_cachep, req);
  66. return NULL;
  67. }
  68. fuse_request_init(req, pages, page_descs, npages);
  69. }
  70. return req;
  71. }
  72. struct fuse_req *fuse_request_alloc(unsigned npages)
  73. {
  74. return __fuse_request_alloc(npages, GFP_KERNEL);
  75. }
  76. EXPORT_SYMBOL_GPL(fuse_request_alloc);
  77. struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
  78. {
  79. return __fuse_request_alloc(npages, GFP_NOFS);
  80. }
  81. void fuse_request_free(struct fuse_req *req)
  82. {
  83. if (req->pages != req->inline_pages) {
  84. kfree(req->pages);
  85. kfree(req->page_descs);
  86. }
  87. kmem_cache_free(fuse_req_cachep, req);
  88. }
  89. void __fuse_get_request(struct fuse_req *req)
  90. {
  91. refcount_inc(&req->count);
  92. }
  93. /* Must be called with > 1 refcount */
  94. static void __fuse_put_request(struct fuse_req *req)
  95. {
  96. refcount_dec(&req->count);
  97. }
  98. static void fuse_req_init_context(struct fuse_conn *fc, struct fuse_req *req)
  99. {
  100. req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
  101. req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
  102. req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns);
  103. }
  104. void fuse_set_initialized(struct fuse_conn *fc)
  105. {
  106. /* Make sure stores before this are seen on another CPU */
  107. smp_wmb();
  108. fc->initialized = 1;
  109. }
  110. static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
  111. {
  112. return !fc->initialized || (for_background && fc->blocked);
  113. }
  114. static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
  115. bool for_background)
  116. {
  117. struct fuse_req *req;
  118. int err;
  119. atomic_inc(&fc->num_waiting);
  120. if (fuse_block_alloc(fc, for_background)) {
  121. err = -EINTR;
  122. if (wait_event_killable_exclusive(fc->blocked_waitq,
  123. !fuse_block_alloc(fc, for_background)))
  124. goto out;
  125. }
  126. /* Matches smp_wmb() in fuse_set_initialized() */
  127. smp_rmb();
  128. err = -ENOTCONN;
  129. if (!fc->connected)
  130. goto out;
  131. err = -ECONNREFUSED;
  132. if (fc->conn_error)
  133. goto out;
  134. req = fuse_request_alloc(npages);
  135. err = -ENOMEM;
  136. if (!req) {
  137. if (for_background)
  138. wake_up(&fc->blocked_waitq);
  139. goto out;
  140. }
  141. fuse_req_init_context(fc, req);
  142. __set_bit(FR_WAITING, &req->flags);
  143. if (for_background)
  144. __set_bit(FR_BACKGROUND, &req->flags);
  145. return req;
  146. out:
  147. atomic_dec(&fc->num_waiting);
  148. return ERR_PTR(err);
  149. }
  150. struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
  151. {
  152. return __fuse_get_req(fc, npages, false);
  153. }
  154. EXPORT_SYMBOL_GPL(fuse_get_req);
  155. struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
  156. unsigned npages)
  157. {
  158. return __fuse_get_req(fc, npages, true);
  159. }
  160. EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
  161. /*
  162. * Return request in fuse_file->reserved_req. However that may
  163. * currently be in use. If that is the case, wait for it to become
  164. * available.
  165. */
  166. static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
  167. struct file *file)
  168. {
  169. struct fuse_req *req = NULL;
  170. struct fuse_file *ff = file->private_data;
  171. do {
  172. wait_event(fc->reserved_req_waitq, ff->reserved_req);
  173. spin_lock(&fc->lock);
  174. if (ff->reserved_req) {
  175. req = ff->reserved_req;
  176. ff->reserved_req = NULL;
  177. req->stolen_file = get_file(file);
  178. }
  179. spin_unlock(&fc->lock);
  180. } while (!req);
  181. return req;
  182. }
  183. /*
  184. * Put stolen request back into fuse_file->reserved_req
  185. */
  186. static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
  187. {
  188. struct file *file = req->stolen_file;
  189. struct fuse_file *ff = file->private_data;
  190. spin_lock(&fc->lock);
  191. fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
  192. BUG_ON(ff->reserved_req);
  193. ff->reserved_req = req;
  194. wake_up_all(&fc->reserved_req_waitq);
  195. spin_unlock(&fc->lock);
  196. fput(file);
  197. }
  198. /*
  199. * Gets a requests for a file operation, always succeeds
  200. *
  201. * This is used for sending the FLUSH request, which must get to
  202. * userspace, due to POSIX locks which may need to be unlocked.
  203. *
  204. * If allocation fails due to OOM, use the reserved request in
  205. * fuse_file.
  206. *
  207. * This is very unlikely to deadlock accidentally, since the
  208. * filesystem should not have it's own file open. If deadlock is
  209. * intentional, it can still be broken by "aborting" the filesystem.
  210. */
  211. struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
  212. struct file *file)
  213. {
  214. struct fuse_req *req;
  215. atomic_inc(&fc->num_waiting);
  216. wait_event(fc->blocked_waitq, fc->initialized);
  217. /* Matches smp_wmb() in fuse_set_initialized() */
  218. smp_rmb();
  219. req = fuse_request_alloc(0);
  220. if (!req)
  221. req = get_reserved_req(fc, file);
  222. fuse_req_init_context(fc, req);
  223. __set_bit(FR_WAITING, &req->flags);
  224. __clear_bit(FR_BACKGROUND, &req->flags);
  225. return req;
  226. }
  227. void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
  228. {
  229. if (refcount_dec_and_test(&req->count)) {
  230. if (test_bit(FR_BACKGROUND, &req->flags)) {
  231. /*
  232. * We get here in the unlikely case that a background
  233. * request was allocated but not sent
  234. */
  235. spin_lock(&fc->lock);
  236. if (!fc->blocked)
  237. wake_up(&fc->blocked_waitq);
  238. spin_unlock(&fc->lock);
  239. }
  240. if (test_bit(FR_WAITING, &req->flags)) {
  241. __clear_bit(FR_WAITING, &req->flags);
  242. atomic_dec(&fc->num_waiting);
  243. }
  244. if (req->stolen_file)
  245. put_reserved_req(fc, req);
  246. else
  247. fuse_request_free(req);
  248. }
  249. }
  250. EXPORT_SYMBOL_GPL(fuse_put_request);
  251. static unsigned len_args(unsigned numargs, struct fuse_arg *args)
  252. {
  253. unsigned nbytes = 0;
  254. unsigned i;
  255. for (i = 0; i < numargs; i++)
  256. nbytes += args[i].size;
  257. return nbytes;
  258. }
  259. static u64 fuse_get_unique(struct fuse_iqueue *fiq)
  260. {
  261. return ++fiq->reqctr;
  262. }
  263. static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
  264. {
  265. req->in.h.len = sizeof(struct fuse_in_header) +
  266. len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
  267. list_add_tail(&req->list, &fiq->pending);
  268. wake_up_locked(&fiq->waitq);
  269. kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
  270. }
  271. void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
  272. u64 nodeid, u64 nlookup)
  273. {
  274. struct fuse_iqueue *fiq = &fc->iq;
  275. forget->forget_one.nodeid = nodeid;
  276. forget->forget_one.nlookup = nlookup;
  277. spin_lock(&fiq->waitq.lock);
  278. if (fiq->connected) {
  279. fiq->forget_list_tail->next = forget;
  280. fiq->forget_list_tail = forget;
  281. wake_up_locked(&fiq->waitq);
  282. kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
  283. } else {
  284. kfree(forget);
  285. }
  286. spin_unlock(&fiq->waitq.lock);
  287. }
  288. static void flush_bg_queue(struct fuse_conn *fc)
  289. {
  290. while (fc->active_background < fc->max_background &&
  291. !list_empty(&fc->bg_queue)) {
  292. struct fuse_req *req;
  293. struct fuse_iqueue *fiq = &fc->iq;
  294. req = list_entry(fc->bg_queue.next, struct fuse_req, list);
  295. list_del(&req->list);
  296. fc->active_background++;
  297. spin_lock(&fiq->waitq.lock);
  298. req->in.h.unique = fuse_get_unique(fiq);
  299. queue_request(fiq, req);
  300. spin_unlock(&fiq->waitq.lock);
  301. }
  302. }
  303. /*
  304. * This function is called when a request is finished. Either a reply
  305. * has arrived or it was aborted (and not yet sent) or some error
  306. * occurred during communication with userspace, or the device file
  307. * was closed. The requester thread is woken up (if still waiting),
  308. * the 'end' callback is called if given, else the reference to the
  309. * request is released
  310. */
  311. static void request_end(struct fuse_conn *fc, struct fuse_req *req)
  312. {
  313. struct fuse_iqueue *fiq = &fc->iq;
  314. if (test_and_set_bit(FR_FINISHED, &req->flags))
  315. return;
  316. spin_lock(&fiq->waitq.lock);
  317. list_del_init(&req->intr_entry);
  318. spin_unlock(&fiq->waitq.lock);
  319. WARN_ON(test_bit(FR_PENDING, &req->flags));
  320. WARN_ON(test_bit(FR_SENT, &req->flags));
  321. if (test_bit(FR_BACKGROUND, &req->flags)) {
  322. spin_lock(&fc->lock);
  323. clear_bit(FR_BACKGROUND, &req->flags);
  324. if (fc->num_background == fc->max_background)
  325. fc->blocked = 0;
  326. /* Wake up next waiter, if any */
  327. if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
  328. wake_up(&fc->blocked_waitq);
  329. if (fc->num_background == fc->congestion_threshold &&
  330. fc->connected && fc->sb) {
  331. clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
  332. clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
  333. }
  334. fc->num_background--;
  335. fc->active_background--;
  336. flush_bg_queue(fc);
  337. spin_unlock(&fc->lock);
  338. }
  339. wake_up(&req->waitq);
  340. if (req->end)
  341. req->end(fc, req);
  342. fuse_put_request(fc, req);
  343. }
  344. static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
  345. {
  346. spin_lock(&fiq->waitq.lock);
  347. if (test_bit(FR_FINISHED, &req->flags)) {
  348. spin_unlock(&fiq->waitq.lock);
  349. return;
  350. }
  351. if (list_empty(&req->intr_entry)) {
  352. list_add_tail(&req->intr_entry, &fiq->interrupts);
  353. wake_up_locked(&fiq->waitq);
  354. }
  355. spin_unlock(&fiq->waitq.lock);
  356. kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
  357. }
  358. static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
  359. {
  360. struct fuse_iqueue *fiq = &fc->iq;
  361. int err;
  362. if (!fc->no_interrupt) {
  363. /* Any signal may interrupt this */
  364. err = wait_event_interruptible(req->waitq,
  365. test_bit(FR_FINISHED, &req->flags));
  366. if (!err)
  367. return;
  368. set_bit(FR_INTERRUPTED, &req->flags);
  369. /* matches barrier in fuse_dev_do_read() */
  370. smp_mb__after_atomic();
  371. if (test_bit(FR_SENT, &req->flags))
  372. queue_interrupt(fiq, req);
  373. }
  374. if (!test_bit(FR_FORCE, &req->flags)) {
  375. /* Only fatal signals may interrupt this */
  376. err = wait_event_killable(req->waitq,
  377. test_bit(FR_FINISHED, &req->flags));
  378. if (!err)
  379. return;
  380. spin_lock(&fiq->waitq.lock);
  381. /* Request is not yet in userspace, bail out */
  382. if (test_bit(FR_PENDING, &req->flags)) {
  383. list_del(&req->list);
  384. spin_unlock(&fiq->waitq.lock);
  385. __fuse_put_request(req);
  386. req->out.h.error = -EINTR;
  387. return;
  388. }
  389. spin_unlock(&fiq->waitq.lock);
  390. }
  391. /*
  392. * Either request is already in userspace, or it was forced.
  393. * Wait it out.
  394. */
  395. wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
  396. }
  397. static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
  398. {
  399. struct fuse_iqueue *fiq = &fc->iq;
  400. BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
  401. spin_lock(&fiq->waitq.lock);
  402. if (!fiq->connected) {
  403. spin_unlock(&fiq->waitq.lock);
  404. req->out.h.error = -ENOTCONN;
  405. } else {
  406. req->in.h.unique = fuse_get_unique(fiq);
  407. queue_request(fiq, req);
  408. /* acquire extra reference, since request is still needed
  409. after request_end() */
  410. __fuse_get_request(req);
  411. spin_unlock(&fiq->waitq.lock);
  412. request_wait_answer(fc, req);
  413. /* Pairs with smp_wmb() in request_end() */
  414. smp_rmb();
  415. }
  416. }
  417. void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
  418. {
  419. __set_bit(FR_ISREPLY, &req->flags);
  420. if (!test_bit(FR_WAITING, &req->flags)) {
  421. __set_bit(FR_WAITING, &req->flags);
  422. atomic_inc(&fc->num_waiting);
  423. }
  424. __fuse_request_send(fc, req);
  425. }
  426. EXPORT_SYMBOL_GPL(fuse_request_send);
  427. static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
  428. {
  429. if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
  430. args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
  431. if (fc->minor < 9) {
  432. switch (args->in.h.opcode) {
  433. case FUSE_LOOKUP:
  434. case FUSE_CREATE:
  435. case FUSE_MKNOD:
  436. case FUSE_MKDIR:
  437. case FUSE_SYMLINK:
  438. case FUSE_LINK:
  439. args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
  440. break;
  441. case FUSE_GETATTR:
  442. case FUSE_SETATTR:
  443. args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
  444. break;
  445. }
  446. }
  447. if (fc->minor < 12) {
  448. switch (args->in.h.opcode) {
  449. case FUSE_CREATE:
  450. args->in.args[0].size = sizeof(struct fuse_open_in);
  451. break;
  452. case FUSE_MKNOD:
  453. args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
  454. break;
  455. }
  456. }
  457. }
  458. ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
  459. {
  460. struct fuse_req *req;
  461. ssize_t ret;
  462. req = fuse_get_req(fc, 0);
  463. if (IS_ERR(req))
  464. return PTR_ERR(req);
  465. /* Needs to be done after fuse_get_req() so that fc->minor is valid */
  466. fuse_adjust_compat(fc, args);
  467. req->in.h.opcode = args->in.h.opcode;
  468. req->in.h.nodeid = args->in.h.nodeid;
  469. req->in.numargs = args->in.numargs;
  470. memcpy(req->in.args, args->in.args,
  471. args->in.numargs * sizeof(struct fuse_in_arg));
  472. req->out.argvar = args->out.argvar;
  473. req->out.numargs = args->out.numargs;
  474. memcpy(req->out.args, args->out.args,
  475. args->out.numargs * sizeof(struct fuse_arg));
  476. fuse_request_send(fc, req);
  477. ret = req->out.h.error;
  478. if (!ret && args->out.argvar) {
  479. BUG_ON(args->out.numargs != 1);
  480. ret = req->out.args[0].size;
  481. }
  482. fuse_put_request(fc, req);
  483. return ret;
  484. }
  485. /*
  486. * Called under fc->lock
  487. *
  488. * fc->connected must have been checked previously
  489. */
  490. void fuse_request_send_background_locked(struct fuse_conn *fc,
  491. struct fuse_req *req)
  492. {
  493. BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
  494. if (!test_bit(FR_WAITING, &req->flags)) {
  495. __set_bit(FR_WAITING, &req->flags);
  496. atomic_inc(&fc->num_waiting);
  497. }
  498. __set_bit(FR_ISREPLY, &req->flags);
  499. fc->num_background++;
  500. if (fc->num_background == fc->max_background)
  501. fc->blocked = 1;
  502. if (fc->num_background == fc->congestion_threshold && fc->sb) {
  503. set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
  504. set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
  505. }
  506. list_add_tail(&req->list, &fc->bg_queue);
  507. flush_bg_queue(fc);
  508. }
  509. void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
  510. {
  511. BUG_ON(!req->end);
  512. spin_lock(&fc->lock);
  513. if (fc->connected) {
  514. fuse_request_send_background_locked(fc, req);
  515. spin_unlock(&fc->lock);
  516. } else {
  517. spin_unlock(&fc->lock);
  518. req->out.h.error = -ENOTCONN;
  519. req->end(fc, req);
  520. fuse_put_request(fc, req);
  521. }
  522. }
  523. EXPORT_SYMBOL_GPL(fuse_request_send_background);
  524. static int fuse_request_send_notify_reply(struct fuse_conn *fc,
  525. struct fuse_req *req, u64 unique)
  526. {
  527. int err = -ENODEV;
  528. struct fuse_iqueue *fiq = &fc->iq;
  529. __clear_bit(FR_ISREPLY, &req->flags);
  530. req->in.h.unique = unique;
  531. spin_lock(&fiq->waitq.lock);
  532. if (fiq->connected) {
  533. queue_request(fiq, req);
  534. err = 0;
  535. }
  536. spin_unlock(&fiq->waitq.lock);
  537. return err;
  538. }
  539. void fuse_force_forget(struct file *file, u64 nodeid)
  540. {
  541. struct inode *inode = file_inode(file);
  542. struct fuse_conn *fc = get_fuse_conn(inode);
  543. struct fuse_req *req;
  544. struct fuse_forget_in inarg;
  545. memset(&inarg, 0, sizeof(inarg));
  546. inarg.nlookup = 1;
  547. req = fuse_get_req_nofail_nopages(fc, file);
  548. req->in.h.opcode = FUSE_FORGET;
  549. req->in.h.nodeid = nodeid;
  550. req->in.numargs = 1;
  551. req->in.args[0].size = sizeof(inarg);
  552. req->in.args[0].value = &inarg;
  553. __clear_bit(FR_ISREPLY, &req->flags);
  554. __fuse_request_send(fc, req);
  555. /* ignore errors */
  556. fuse_put_request(fc, req);
  557. }
  558. /*
  559. * Lock the request. Up to the next unlock_request() there mustn't be
  560. * anything that could cause a page-fault. If the request was already
  561. * aborted bail out.
  562. */
  563. static int lock_request(struct fuse_req *req)
  564. {
  565. int err = 0;
  566. if (req) {
  567. spin_lock(&req->waitq.lock);
  568. if (test_bit(FR_ABORTED, &req->flags))
  569. err = -ENOENT;
  570. else
  571. set_bit(FR_LOCKED, &req->flags);
  572. spin_unlock(&req->waitq.lock);
  573. }
  574. return err;
  575. }
  576. /*
  577. * Unlock request. If it was aborted while locked, caller is responsible
  578. * for unlocking and ending the request.
  579. */
  580. static int unlock_request(struct fuse_req *req)
  581. {
  582. int err = 0;
  583. if (req) {
  584. spin_lock(&req->waitq.lock);
  585. if (test_bit(FR_ABORTED, &req->flags))
  586. err = -ENOENT;
  587. else
  588. clear_bit(FR_LOCKED, &req->flags);
  589. spin_unlock(&req->waitq.lock);
  590. }
  591. return err;
  592. }
  593. struct fuse_copy_state {
  594. int write;
  595. struct fuse_req *req;
  596. struct iov_iter *iter;
  597. struct pipe_buffer *pipebufs;
  598. struct pipe_buffer *currbuf;
  599. struct pipe_inode_info *pipe;
  600. unsigned long nr_segs;
  601. struct page *pg;
  602. unsigned len;
  603. unsigned offset;
  604. unsigned move_pages:1;
  605. };
  606. static void fuse_copy_init(struct fuse_copy_state *cs, int write,
  607. struct iov_iter *iter)
  608. {
  609. memset(cs, 0, sizeof(*cs));
  610. cs->write = write;
  611. cs->iter = iter;
  612. }
  613. /* Unmap and put previous page of userspace buffer */
  614. static void fuse_copy_finish(struct fuse_copy_state *cs)
  615. {
  616. if (cs->currbuf) {
  617. struct pipe_buffer *buf = cs->currbuf;
  618. if (cs->write)
  619. buf->len = PAGE_SIZE - cs->len;
  620. cs->currbuf = NULL;
  621. } else if (cs->pg) {
  622. if (cs->write) {
  623. flush_dcache_page(cs->pg);
  624. set_page_dirty_lock(cs->pg);
  625. }
  626. put_page(cs->pg);
  627. }
  628. cs->pg = NULL;
  629. }
  630. /*
  631. * Get another pagefull of userspace buffer, and map it to kernel
  632. * address space, and lock request
  633. */
  634. static int fuse_copy_fill(struct fuse_copy_state *cs)
  635. {
  636. struct page *page;
  637. int err;
  638. err = unlock_request(cs->req);
  639. if (err)
  640. return err;
  641. fuse_copy_finish(cs);
  642. if (cs->pipebufs) {
  643. struct pipe_buffer *buf = cs->pipebufs;
  644. if (!cs->write) {
  645. err = pipe_buf_confirm(cs->pipe, buf);
  646. if (err)
  647. return err;
  648. BUG_ON(!cs->nr_segs);
  649. cs->currbuf = buf;
  650. cs->pg = buf->page;
  651. cs->offset = buf->offset;
  652. cs->len = buf->len;
  653. cs->pipebufs++;
  654. cs->nr_segs--;
  655. } else {
  656. if (cs->nr_segs == cs->pipe->buffers)
  657. return -EIO;
  658. page = alloc_page(GFP_HIGHUSER);
  659. if (!page)
  660. return -ENOMEM;
  661. buf->page = page;
  662. buf->offset = 0;
  663. buf->len = 0;
  664. cs->currbuf = buf;
  665. cs->pg = page;
  666. cs->offset = 0;
  667. cs->len = PAGE_SIZE;
  668. cs->pipebufs++;
  669. cs->nr_segs++;
  670. }
  671. } else {
  672. size_t off;
  673. err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
  674. if (err < 0)
  675. return err;
  676. BUG_ON(!err);
  677. cs->len = err;
  678. cs->offset = off;
  679. cs->pg = page;
  680. iov_iter_advance(cs->iter, err);
  681. }
  682. return lock_request(cs->req);
  683. }
  684. /* Do as much copy to/from userspace buffer as we can */
  685. static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
  686. {
  687. unsigned ncpy = min(*size, cs->len);
  688. if (val) {
  689. void *pgaddr = kmap_atomic(cs->pg);
  690. void *buf = pgaddr + cs->offset;
  691. if (cs->write)
  692. memcpy(buf, *val, ncpy);
  693. else
  694. memcpy(*val, buf, ncpy);
  695. kunmap_atomic(pgaddr);
  696. *val += ncpy;
  697. }
  698. *size -= ncpy;
  699. cs->len -= ncpy;
  700. cs->offset += ncpy;
  701. return ncpy;
  702. }
  703. static int fuse_check_page(struct page *page)
  704. {
  705. if (page_mapcount(page) ||
  706. page->mapping != NULL ||
  707. page_count(page) != 1 ||
  708. (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
  709. ~(1 << PG_locked |
  710. 1 << PG_referenced |
  711. 1 << PG_uptodate |
  712. 1 << PG_lru |
  713. 1 << PG_active |
  714. 1 << PG_reclaim))) {
  715. printk(KERN_WARNING "fuse: trying to steal weird page\n");
  716. printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
  717. return 1;
  718. }
  719. return 0;
  720. }
  721. static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
  722. {
  723. int err;
  724. struct page *oldpage = *pagep;
  725. struct page *newpage;
  726. struct pipe_buffer *buf = cs->pipebufs;
  727. err = unlock_request(cs->req);
  728. if (err)
  729. return err;
  730. fuse_copy_finish(cs);
  731. err = pipe_buf_confirm(cs->pipe, buf);
  732. if (err)
  733. return err;
  734. BUG_ON(!cs->nr_segs);
  735. cs->currbuf = buf;
  736. cs->len = buf->len;
  737. cs->pipebufs++;
  738. cs->nr_segs--;
  739. if (cs->len != PAGE_SIZE)
  740. goto out_fallback;
  741. if (pipe_buf_steal(cs->pipe, buf) != 0)
  742. goto out_fallback;
  743. newpage = buf->page;
  744. if (!PageUptodate(newpage))
  745. SetPageUptodate(newpage);
  746. ClearPageMappedToDisk(newpage);
  747. if (fuse_check_page(newpage) != 0)
  748. goto out_fallback_unlock;
  749. /*
  750. * This is a new and locked page, it shouldn't be mapped or
  751. * have any special flags on it
  752. */
  753. if (WARN_ON(page_mapped(oldpage)))
  754. goto out_fallback_unlock;
  755. if (WARN_ON(page_has_private(oldpage)))
  756. goto out_fallback_unlock;
  757. if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
  758. goto out_fallback_unlock;
  759. if (WARN_ON(PageMlocked(oldpage)))
  760. goto out_fallback_unlock;
  761. err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
  762. if (err) {
  763. unlock_page(newpage);
  764. return err;
  765. }
  766. get_page(newpage);
  767. if (!(buf->flags & PIPE_BUF_FLAG_LRU))
  768. lru_cache_add_file(newpage);
  769. err = 0;
  770. spin_lock(&cs->req->waitq.lock);
  771. if (test_bit(FR_ABORTED, &cs->req->flags))
  772. err = -ENOENT;
  773. else
  774. *pagep = newpage;
  775. spin_unlock(&cs->req->waitq.lock);
  776. if (err) {
  777. unlock_page(newpage);
  778. put_page(newpage);
  779. return err;
  780. }
  781. unlock_page(oldpage);
  782. put_page(oldpage);
  783. cs->len = 0;
  784. return 0;
  785. out_fallback_unlock:
  786. unlock_page(newpage);
  787. out_fallback:
  788. cs->pg = buf->page;
  789. cs->offset = buf->offset;
  790. err = lock_request(cs->req);
  791. if (err)
  792. return err;
  793. return 1;
  794. }
  795. static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
  796. unsigned offset, unsigned count)
  797. {
  798. struct pipe_buffer *buf;
  799. int err;
  800. if (cs->nr_segs == cs->pipe->buffers)
  801. return -EIO;
  802. err = unlock_request(cs->req);
  803. if (err)
  804. return err;
  805. fuse_copy_finish(cs);
  806. buf = cs->pipebufs;
  807. get_page(page);
  808. buf->page = page;
  809. buf->offset = offset;
  810. buf->len = count;
  811. cs->pipebufs++;
  812. cs->nr_segs++;
  813. cs->len = 0;
  814. return 0;
  815. }
  816. /*
  817. * Copy a page in the request to/from the userspace buffer. Must be
  818. * done atomically
  819. */
  820. static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
  821. unsigned offset, unsigned count, int zeroing)
  822. {
  823. int err;
  824. struct page *page = *pagep;
  825. if (page && zeroing && count < PAGE_SIZE)
  826. clear_highpage(page);
  827. while (count) {
  828. if (cs->write && cs->pipebufs && page) {
  829. return fuse_ref_page(cs, page, offset, count);
  830. } else if (!cs->len) {
  831. if (cs->move_pages && page &&
  832. offset == 0 && count == PAGE_SIZE) {
  833. err = fuse_try_move_page(cs, pagep);
  834. if (err <= 0)
  835. return err;
  836. } else {
  837. err = fuse_copy_fill(cs);
  838. if (err)
  839. return err;
  840. }
  841. }
  842. if (page) {
  843. void *mapaddr = kmap_atomic(page);
  844. void *buf = mapaddr + offset;
  845. offset += fuse_copy_do(cs, &buf, &count);
  846. kunmap_atomic(mapaddr);
  847. } else
  848. offset += fuse_copy_do(cs, NULL, &count);
  849. }
  850. if (page && !cs->write)
  851. flush_dcache_page(page);
  852. return 0;
  853. }
  854. /* Copy pages in the request to/from userspace buffer */
  855. static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
  856. int zeroing)
  857. {
  858. unsigned i;
  859. struct fuse_req *req = cs->req;
  860. for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
  861. int err;
  862. unsigned offset = req->page_descs[i].offset;
  863. unsigned count = min(nbytes, req->page_descs[i].length);
  864. err = fuse_copy_page(cs, &req->pages[i], offset, count,
  865. zeroing);
  866. if (err)
  867. return err;
  868. nbytes -= count;
  869. }
  870. return 0;
  871. }
  872. /* Copy a single argument in the request to/from userspace buffer */
  873. static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
  874. {
  875. while (size) {
  876. if (!cs->len) {
  877. int err = fuse_copy_fill(cs);
  878. if (err)
  879. return err;
  880. }
  881. fuse_copy_do(cs, &val, &size);
  882. }
  883. return 0;
  884. }
  885. /* Copy request arguments to/from userspace buffer */
  886. static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
  887. unsigned argpages, struct fuse_arg *args,
  888. int zeroing)
  889. {
  890. int err = 0;
  891. unsigned i;
  892. for (i = 0; !err && i < numargs; i++) {
  893. struct fuse_arg *arg = &args[i];
  894. if (i == numargs - 1 && argpages)
  895. err = fuse_copy_pages(cs, arg->size, zeroing);
  896. else
  897. err = fuse_copy_one(cs, arg->value, arg->size);
  898. }
  899. return err;
  900. }
  901. static int forget_pending(struct fuse_iqueue *fiq)
  902. {
  903. return fiq->forget_list_head.next != NULL;
  904. }
  905. static int request_pending(struct fuse_iqueue *fiq)
  906. {
  907. return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
  908. forget_pending(fiq);
  909. }
  910. /*
  911. * Transfer an interrupt request to userspace
  912. *
  913. * Unlike other requests this is assembled on demand, without a need
  914. * to allocate a separate fuse_req structure.
  915. *
  916. * Called with fiq->waitq.lock held, releases it
  917. */
  918. static int fuse_read_interrupt(struct fuse_iqueue *fiq,
  919. struct fuse_copy_state *cs,
  920. size_t nbytes, struct fuse_req *req)
  921. __releases(fiq->waitq.lock)
  922. {
  923. struct fuse_in_header ih;
  924. struct fuse_interrupt_in arg;
  925. unsigned reqsize = sizeof(ih) + sizeof(arg);
  926. int err;
  927. list_del_init(&req->intr_entry);
  928. req->intr_unique = fuse_get_unique(fiq);
  929. memset(&ih, 0, sizeof(ih));
  930. memset(&arg, 0, sizeof(arg));
  931. ih.len = reqsize;
  932. ih.opcode = FUSE_INTERRUPT;
  933. ih.unique = req->intr_unique;
  934. arg.unique = req->in.h.unique;
  935. spin_unlock(&fiq->waitq.lock);
  936. if (nbytes < reqsize)
  937. return -EINVAL;
  938. err = fuse_copy_one(cs, &ih, sizeof(ih));
  939. if (!err)
  940. err = fuse_copy_one(cs, &arg, sizeof(arg));
  941. fuse_copy_finish(cs);
  942. return err ? err : reqsize;
  943. }
  944. static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
  945. unsigned max,
  946. unsigned *countp)
  947. {
  948. struct fuse_forget_link *head = fiq->forget_list_head.next;
  949. struct fuse_forget_link **newhead = &head;
  950. unsigned count;
  951. for (count = 0; *newhead != NULL && count < max; count++)
  952. newhead = &(*newhead)->next;
  953. fiq->forget_list_head.next = *newhead;
  954. *newhead = NULL;
  955. if (fiq->forget_list_head.next == NULL)
  956. fiq->forget_list_tail = &fiq->forget_list_head;
  957. if (countp != NULL)
  958. *countp = count;
  959. return head;
  960. }
  961. static int fuse_read_single_forget(struct fuse_iqueue *fiq,
  962. struct fuse_copy_state *cs,
  963. size_t nbytes)
  964. __releases(fiq->waitq.lock)
  965. {
  966. int err;
  967. struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
  968. struct fuse_forget_in arg = {
  969. .nlookup = forget->forget_one.nlookup,
  970. };
  971. struct fuse_in_header ih = {
  972. .opcode = FUSE_FORGET,
  973. .nodeid = forget->forget_one.nodeid,
  974. .unique = fuse_get_unique(fiq),
  975. .len = sizeof(ih) + sizeof(arg),
  976. };
  977. spin_unlock(&fiq->waitq.lock);
  978. kfree(forget);
  979. if (nbytes < ih.len)
  980. return -EINVAL;
  981. err = fuse_copy_one(cs, &ih, sizeof(ih));
  982. if (!err)
  983. err = fuse_copy_one(cs, &arg, sizeof(arg));
  984. fuse_copy_finish(cs);
  985. if (err)
  986. return err;
  987. return ih.len;
  988. }
  989. static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
  990. struct fuse_copy_state *cs, size_t nbytes)
  991. __releases(fiq->waitq.lock)
  992. {
  993. int err;
  994. unsigned max_forgets;
  995. unsigned count;
  996. struct fuse_forget_link *head;
  997. struct fuse_batch_forget_in arg = { .count = 0 };
  998. struct fuse_in_header ih = {
  999. .opcode = FUSE_BATCH_FORGET,
  1000. .unique = fuse_get_unique(fiq),
  1001. .len = sizeof(ih) + sizeof(arg),
  1002. };
  1003. if (nbytes < ih.len) {
  1004. spin_unlock(&fiq->waitq.lock);
  1005. return -EINVAL;
  1006. }
  1007. max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
  1008. head = dequeue_forget(fiq, max_forgets, &count);
  1009. spin_unlock(&fiq->waitq.lock);
  1010. arg.count = count;
  1011. ih.len += count * sizeof(struct fuse_forget_one);
  1012. err = fuse_copy_one(cs, &ih, sizeof(ih));
  1013. if (!err)
  1014. err = fuse_copy_one(cs, &arg, sizeof(arg));
  1015. while (head) {
  1016. struct fuse_forget_link *forget = head;
  1017. if (!err) {
  1018. err = fuse_copy_one(cs, &forget->forget_one,
  1019. sizeof(forget->forget_one));
  1020. }
  1021. head = forget->next;
  1022. kfree(forget);
  1023. }
  1024. fuse_copy_finish(cs);
  1025. if (err)
  1026. return err;
  1027. return ih.len;
  1028. }
  1029. static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
  1030. struct fuse_copy_state *cs,
  1031. size_t nbytes)
  1032. __releases(fiq->waitq.lock)
  1033. {
  1034. if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
  1035. return fuse_read_single_forget(fiq, cs, nbytes);
  1036. else
  1037. return fuse_read_batch_forget(fiq, cs, nbytes);
  1038. }
  1039. /*
  1040. * Read a single request into the userspace filesystem's buffer. This
  1041. * function waits until a request is available, then removes it from
  1042. * the pending list and copies request data to userspace buffer. If
  1043. * no reply is needed (FORGET) or request has been aborted or there
  1044. * was an error during the copying then it's finished by calling
  1045. * request_end(). Otherwise add it to the processing list, and set
  1046. * the 'sent' flag.
  1047. */
  1048. static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
  1049. struct fuse_copy_state *cs, size_t nbytes)
  1050. {
  1051. ssize_t err;
  1052. struct fuse_conn *fc = fud->fc;
  1053. struct fuse_iqueue *fiq = &fc->iq;
  1054. struct fuse_pqueue *fpq = &fud->pq;
  1055. struct fuse_req *req;
  1056. struct fuse_in *in;
  1057. unsigned reqsize;
  1058. restart:
  1059. spin_lock(&fiq->waitq.lock);
  1060. err = -EAGAIN;
  1061. if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
  1062. !request_pending(fiq))
  1063. goto err_unlock;
  1064. err = wait_event_interruptible_exclusive_locked(fiq->waitq,
  1065. !fiq->connected || request_pending(fiq));
  1066. if (err)
  1067. goto err_unlock;
  1068. err = -ENODEV;
  1069. if (!fiq->connected)
  1070. goto err_unlock;
  1071. if (!list_empty(&fiq->interrupts)) {
  1072. req = list_entry(fiq->interrupts.next, struct fuse_req,
  1073. intr_entry);
  1074. return fuse_read_interrupt(fiq, cs, nbytes, req);
  1075. }
  1076. if (forget_pending(fiq)) {
  1077. if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
  1078. return fuse_read_forget(fc, fiq, cs, nbytes);
  1079. if (fiq->forget_batch <= -8)
  1080. fiq->forget_batch = 16;
  1081. }
  1082. req = list_entry(fiq->pending.next, struct fuse_req, list);
  1083. clear_bit(FR_PENDING, &req->flags);
  1084. list_del_init(&req->list);
  1085. spin_unlock(&fiq->waitq.lock);
  1086. in = &req->in;
  1087. reqsize = in->h.len;
  1088. if (task_active_pid_ns(current) != fc->pid_ns) {
  1089. rcu_read_lock();
  1090. in->h.pid = pid_vnr(find_pid_ns(in->h.pid, fc->pid_ns));
  1091. rcu_read_unlock();
  1092. }
  1093. /* If request is too large, reply with an error and restart the read */
  1094. if (nbytes < reqsize) {
  1095. req->out.h.error = -EIO;
  1096. /* SETXATTR is special, since it may contain too large data */
  1097. if (in->h.opcode == FUSE_SETXATTR)
  1098. req->out.h.error = -E2BIG;
  1099. request_end(fc, req);
  1100. goto restart;
  1101. }
  1102. spin_lock(&fpq->lock);
  1103. list_add(&req->list, &fpq->io);
  1104. spin_unlock(&fpq->lock);
  1105. cs->req = req;
  1106. err = fuse_copy_one(cs, &in->h, sizeof(in->h));
  1107. if (!err)
  1108. err = fuse_copy_args(cs, in->numargs, in->argpages,
  1109. (struct fuse_arg *) in->args, 0);
  1110. fuse_copy_finish(cs);
  1111. spin_lock(&fpq->lock);
  1112. clear_bit(FR_LOCKED, &req->flags);
  1113. if (!fpq->connected) {
  1114. err = -ENODEV;
  1115. goto out_end;
  1116. }
  1117. if (err) {
  1118. req->out.h.error = -EIO;
  1119. goto out_end;
  1120. }
  1121. if (!test_bit(FR_ISREPLY, &req->flags)) {
  1122. err = reqsize;
  1123. goto out_end;
  1124. }
  1125. list_move_tail(&req->list, &fpq->processing);
  1126. spin_unlock(&fpq->lock);
  1127. set_bit(FR_SENT, &req->flags);
  1128. /* matches barrier in request_wait_answer() */
  1129. smp_mb__after_atomic();
  1130. if (test_bit(FR_INTERRUPTED, &req->flags))
  1131. queue_interrupt(fiq, req);
  1132. return reqsize;
  1133. out_end:
  1134. if (!test_bit(FR_PRIVATE, &req->flags))
  1135. list_del_init(&req->list);
  1136. spin_unlock(&fpq->lock);
  1137. request_end(fc, req);
  1138. return err;
  1139. err_unlock:
  1140. spin_unlock(&fiq->waitq.lock);
  1141. return err;
  1142. }
  1143. static int fuse_dev_open(struct inode *inode, struct file *file)
  1144. {
  1145. /*
  1146. * The fuse device's file's private_data is used to hold
  1147. * the fuse_conn(ection) when it is mounted, and is used to
  1148. * keep track of whether the file has been mounted already.
  1149. */
  1150. file->private_data = NULL;
  1151. return 0;
  1152. }
  1153. static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
  1154. {
  1155. struct fuse_copy_state cs;
  1156. struct file *file = iocb->ki_filp;
  1157. struct fuse_dev *fud = fuse_get_dev(file);
  1158. if (!fud)
  1159. return -EPERM;
  1160. if (!iter_is_iovec(to))
  1161. return -EINVAL;
  1162. fuse_copy_init(&cs, 1, to);
  1163. return fuse_dev_do_read(fud, file, &cs, iov_iter_count(to));
  1164. }
  1165. static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
  1166. struct pipe_inode_info *pipe,
  1167. size_t len, unsigned int flags)
  1168. {
  1169. int total, ret;
  1170. int page_nr = 0;
  1171. struct pipe_buffer *bufs;
  1172. struct fuse_copy_state cs;
  1173. struct fuse_dev *fud = fuse_get_dev(in);
  1174. if (!fud)
  1175. return -EPERM;
  1176. bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
  1177. if (!bufs)
  1178. return -ENOMEM;
  1179. fuse_copy_init(&cs, 1, NULL);
  1180. cs.pipebufs = bufs;
  1181. cs.pipe = pipe;
  1182. ret = fuse_dev_do_read(fud, in, &cs, len);
  1183. if (ret < 0)
  1184. goto out;
  1185. if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
  1186. ret = -EIO;
  1187. goto out;
  1188. }
  1189. for (ret = total = 0; page_nr < cs.nr_segs; total += ret) {
  1190. /*
  1191. * Need to be careful about this. Having buf->ops in module
  1192. * code can Oops if the buffer persists after module unload.
  1193. */
  1194. bufs[page_nr].ops = &nosteal_pipe_buf_ops;
  1195. bufs[page_nr].flags = 0;
  1196. ret = add_to_pipe(pipe, &bufs[page_nr++]);
  1197. if (unlikely(ret < 0))
  1198. break;
  1199. }
  1200. if (total)
  1201. ret = total;
  1202. out:
  1203. for (; page_nr < cs.nr_segs; page_nr++)
  1204. put_page(bufs[page_nr].page);
  1205. kfree(bufs);
  1206. return ret;
  1207. }
  1208. static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
  1209. struct fuse_copy_state *cs)
  1210. {
  1211. struct fuse_notify_poll_wakeup_out outarg;
  1212. int err = -EINVAL;
  1213. if (size != sizeof(outarg))
  1214. goto err;
  1215. err = fuse_copy_one(cs, &outarg, sizeof(outarg));
  1216. if (err)
  1217. goto err;
  1218. fuse_copy_finish(cs);
  1219. return fuse_notify_poll_wakeup(fc, &outarg);
  1220. err:
  1221. fuse_copy_finish(cs);
  1222. return err;
  1223. }
  1224. static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
  1225. struct fuse_copy_state *cs)
  1226. {
  1227. struct fuse_notify_inval_inode_out outarg;
  1228. int err = -EINVAL;
  1229. if (size != sizeof(outarg))
  1230. goto err;
  1231. err = fuse_copy_one(cs, &outarg, sizeof(outarg));
  1232. if (err)
  1233. goto err;
  1234. fuse_copy_finish(cs);
  1235. down_read(&fc->killsb);
  1236. err = -ENOENT;
  1237. if (fc->sb) {
  1238. err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
  1239. outarg.off, outarg.len);
  1240. }
  1241. up_read(&fc->killsb);
  1242. return err;
  1243. err:
  1244. fuse_copy_finish(cs);
  1245. return err;
  1246. }
  1247. static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
  1248. struct fuse_copy_state *cs)
  1249. {
  1250. struct fuse_notify_inval_entry_out outarg;
  1251. int err = -ENOMEM;
  1252. char *buf;
  1253. struct qstr name;
  1254. buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
  1255. if (!buf)
  1256. goto err;
  1257. err = -EINVAL;
  1258. if (size < sizeof(outarg))
  1259. goto err;
  1260. err = fuse_copy_one(cs, &outarg, sizeof(outarg));
  1261. if (err)
  1262. goto err;
  1263. err = -ENAMETOOLONG;
  1264. if (outarg.namelen > FUSE_NAME_MAX)
  1265. goto err;
  1266. err = -EINVAL;
  1267. if (size != sizeof(outarg) + outarg.namelen + 1)
  1268. goto err;
  1269. name.name = buf;
  1270. name.len = outarg.namelen;
  1271. err = fuse_copy_one(cs, buf, outarg.namelen + 1);
  1272. if (err)
  1273. goto err;
  1274. fuse_copy_finish(cs);
  1275. buf[outarg.namelen] = 0;
  1276. down_read(&fc->killsb);
  1277. err = -ENOENT;
  1278. if (fc->sb)
  1279. err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
  1280. up_read(&fc->killsb);
  1281. kfree(buf);
  1282. return err;
  1283. err:
  1284. kfree(buf);
  1285. fuse_copy_finish(cs);
  1286. return err;
  1287. }
  1288. static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
  1289. struct fuse_copy_state *cs)
  1290. {
  1291. struct fuse_notify_delete_out outarg;
  1292. int err = -ENOMEM;
  1293. char *buf;
  1294. struct qstr name;
  1295. buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
  1296. if (!buf)
  1297. goto err;
  1298. err = -EINVAL;
  1299. if (size < sizeof(outarg))
  1300. goto err;
  1301. err = fuse_copy_one(cs, &outarg, sizeof(outarg));
  1302. if (err)
  1303. goto err;
  1304. err = -ENAMETOOLONG;
  1305. if (outarg.namelen > FUSE_NAME_MAX)
  1306. goto err;
  1307. err = -EINVAL;
  1308. if (size != sizeof(outarg) + outarg.namelen + 1)
  1309. goto err;
  1310. name.name = buf;
  1311. name.len = outarg.namelen;
  1312. err = fuse_copy_one(cs, buf, outarg.namelen + 1);
  1313. if (err)
  1314. goto err;
  1315. fuse_copy_finish(cs);
  1316. buf[outarg.namelen] = 0;
  1317. down_read(&fc->killsb);
  1318. err = -ENOENT;
  1319. if (fc->sb)
  1320. err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
  1321. outarg.child, &name);
  1322. up_read(&fc->killsb);
  1323. kfree(buf);
  1324. return err;
  1325. err:
  1326. kfree(buf);
  1327. fuse_copy_finish(cs);
  1328. return err;
  1329. }
  1330. static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
  1331. struct fuse_copy_state *cs)
  1332. {
  1333. struct fuse_notify_store_out outarg;
  1334. struct inode *inode;
  1335. struct address_space *mapping;
  1336. u64 nodeid;
  1337. int err;
  1338. pgoff_t index;
  1339. unsigned int offset;
  1340. unsigned int num;
  1341. loff_t file_size;
  1342. loff_t end;
  1343. err = -EINVAL;
  1344. if (size < sizeof(outarg))
  1345. goto out_finish;
  1346. err = fuse_copy_one(cs, &outarg, sizeof(outarg));
  1347. if (err)
  1348. goto out_finish;
  1349. err = -EINVAL;
  1350. if (size - sizeof(outarg) != outarg.size)
  1351. goto out_finish;
  1352. nodeid = outarg.nodeid;
  1353. down_read(&fc->killsb);
  1354. err = -ENOENT;
  1355. if (!fc->sb)
  1356. goto out_up_killsb;
  1357. inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
  1358. if (!inode)
  1359. goto out_up_killsb;
  1360. mapping = inode->i_mapping;
  1361. index = outarg.offset >> PAGE_SHIFT;
  1362. offset = outarg.offset & ~PAGE_MASK;
  1363. file_size = i_size_read(inode);
  1364. end = outarg.offset + outarg.size;
  1365. if (end > file_size) {
  1366. file_size = end;
  1367. fuse_write_update_size(inode, file_size);
  1368. }
  1369. num = outarg.size;
  1370. while (num) {
  1371. struct page *page;
  1372. unsigned int this_num;
  1373. err = -ENOMEM;
  1374. page = find_or_create_page(mapping, index,
  1375. mapping_gfp_mask(mapping));
  1376. if (!page)
  1377. goto out_iput;
  1378. this_num = min_t(unsigned, num, PAGE_SIZE - offset);
  1379. err = fuse_copy_page(cs, &page, offset, this_num, 0);
  1380. if (!err && offset == 0 &&
  1381. (this_num == PAGE_SIZE || file_size == end))
  1382. SetPageUptodate(page);
  1383. unlock_page(page);
  1384. put_page(page);
  1385. if (err)
  1386. goto out_iput;
  1387. num -= this_num;
  1388. offset = 0;
  1389. index++;
  1390. }
  1391. err = 0;
  1392. out_iput:
  1393. iput(inode);
  1394. out_up_killsb:
  1395. up_read(&fc->killsb);
  1396. out_finish:
  1397. fuse_copy_finish(cs);
  1398. return err;
  1399. }
  1400. static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
  1401. {
  1402. release_pages(req->pages, req->num_pages, false);
  1403. }
  1404. static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
  1405. struct fuse_notify_retrieve_out *outarg)
  1406. {
  1407. int err;
  1408. struct address_space *mapping = inode->i_mapping;
  1409. struct fuse_req *req;
  1410. pgoff_t index;
  1411. loff_t file_size;
  1412. unsigned int num;
  1413. unsigned int offset;
  1414. size_t total_len = 0;
  1415. int num_pages;
  1416. offset = outarg->offset & ~PAGE_MASK;
  1417. file_size = i_size_read(inode);
  1418. num = outarg->size;
  1419. if (outarg->offset > file_size)
  1420. num = 0;
  1421. else if (outarg->offset + num > file_size)
  1422. num = file_size - outarg->offset;
  1423. num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
  1424. num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
  1425. req = fuse_get_req(fc, num_pages);
  1426. if (IS_ERR(req))
  1427. return PTR_ERR(req);
  1428. req->in.h.opcode = FUSE_NOTIFY_REPLY;
  1429. req->in.h.nodeid = outarg->nodeid;
  1430. req->in.numargs = 2;
  1431. req->in.argpages = 1;
  1432. req->page_descs[0].offset = offset;
  1433. req->end = fuse_retrieve_end;
  1434. index = outarg->offset >> PAGE_SHIFT;
  1435. while (num && req->num_pages < num_pages) {
  1436. struct page *page;
  1437. unsigned int this_num;
  1438. page = find_get_page(mapping, index);
  1439. if (!page)
  1440. break;
  1441. this_num = min_t(unsigned, num, PAGE_SIZE - offset);
  1442. req->pages[req->num_pages] = page;
  1443. req->page_descs[req->num_pages].length = this_num;
  1444. req->num_pages++;
  1445. offset = 0;
  1446. num -= this_num;
  1447. total_len += this_num;
  1448. index++;
  1449. }
  1450. req->misc.retrieve_in.offset = outarg->offset;
  1451. req->misc.retrieve_in.size = total_len;
  1452. req->in.args[0].size = sizeof(req->misc.retrieve_in);
  1453. req->in.args[0].value = &req->misc.retrieve_in;
  1454. req->in.args[1].size = total_len;
  1455. err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
  1456. if (err)
  1457. fuse_retrieve_end(fc, req);
  1458. return err;
  1459. }
  1460. static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
  1461. struct fuse_copy_state *cs)
  1462. {
  1463. struct fuse_notify_retrieve_out outarg;
  1464. struct inode *inode;
  1465. int err;
  1466. err = -EINVAL;
  1467. if (size != sizeof(outarg))
  1468. goto copy_finish;
  1469. err = fuse_copy_one(cs, &outarg, sizeof(outarg));
  1470. if (err)
  1471. goto copy_finish;
  1472. fuse_copy_finish(cs);
  1473. down_read(&fc->killsb);
  1474. err = -ENOENT;
  1475. if (fc->sb) {
  1476. u64 nodeid = outarg.nodeid;
  1477. inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
  1478. if (inode) {
  1479. err = fuse_retrieve(fc, inode, &outarg);
  1480. iput(inode);
  1481. }
  1482. }
  1483. up_read(&fc->killsb);
  1484. return err;
  1485. copy_finish:
  1486. fuse_copy_finish(cs);
  1487. return err;
  1488. }
  1489. static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
  1490. unsigned int size, struct fuse_copy_state *cs)
  1491. {
  1492. /* Don't try to move pages (yet) */
  1493. cs->move_pages = 0;
  1494. switch (code) {
  1495. case FUSE_NOTIFY_POLL:
  1496. return fuse_notify_poll(fc, size, cs);
  1497. case FUSE_NOTIFY_INVAL_INODE:
  1498. return fuse_notify_inval_inode(fc, size, cs);
  1499. case FUSE_NOTIFY_INVAL_ENTRY:
  1500. return fuse_notify_inval_entry(fc, size, cs);
  1501. case FUSE_NOTIFY_STORE:
  1502. return fuse_notify_store(fc, size, cs);
  1503. case FUSE_NOTIFY_RETRIEVE:
  1504. return fuse_notify_retrieve(fc, size, cs);
  1505. case FUSE_NOTIFY_DELETE:
  1506. return fuse_notify_delete(fc, size, cs);
  1507. default:
  1508. fuse_copy_finish(cs);
  1509. return -EINVAL;
  1510. }
  1511. }
  1512. /* Look up request on processing list by unique ID */
  1513. static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique)
  1514. {
  1515. struct fuse_req *req;
  1516. list_for_each_entry(req, &fpq->processing, list) {
  1517. if (req->in.h.unique == unique || req->intr_unique == unique)
  1518. return req;
  1519. }
  1520. return NULL;
  1521. }
  1522. static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
  1523. unsigned nbytes)
  1524. {
  1525. unsigned reqsize = sizeof(struct fuse_out_header);
  1526. if (out->h.error)
  1527. return nbytes != reqsize ? -EINVAL : 0;
  1528. reqsize += len_args(out->numargs, out->args);
  1529. if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
  1530. return -EINVAL;
  1531. else if (reqsize > nbytes) {
  1532. struct fuse_arg *lastarg = &out->args[out->numargs-1];
  1533. unsigned diffsize = reqsize - nbytes;
  1534. if (diffsize > lastarg->size)
  1535. return -EINVAL;
  1536. lastarg->size -= diffsize;
  1537. }
  1538. return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
  1539. out->page_zeroing);
  1540. }
  1541. /*
  1542. * Write a single reply to a request. First the header is copied from
  1543. * the write buffer. The request is then searched on the processing
  1544. * list by the unique ID found in the header. If found, then remove
  1545. * it from the list and copy the rest of the buffer to the request.
  1546. * The request is finished by calling request_end()
  1547. */
  1548. static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
  1549. struct fuse_copy_state *cs, size_t nbytes)
  1550. {
  1551. int err;
  1552. struct fuse_conn *fc = fud->fc;
  1553. struct fuse_pqueue *fpq = &fud->pq;
  1554. struct fuse_req *req;
  1555. struct fuse_out_header oh;
  1556. if (nbytes < sizeof(struct fuse_out_header))
  1557. return -EINVAL;
  1558. err = fuse_copy_one(cs, &oh, sizeof(oh));
  1559. if (err)
  1560. goto err_finish;
  1561. err = -EINVAL;
  1562. if (oh.len != nbytes)
  1563. goto err_finish;
  1564. /*
  1565. * Zero oh.unique indicates unsolicited notification message
  1566. * and error contains notification code.
  1567. */
  1568. if (!oh.unique) {
  1569. err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
  1570. return err ? err : nbytes;
  1571. }
  1572. err = -EINVAL;
  1573. if (oh.error <= -1000 || oh.error > 0)
  1574. goto err_finish;
  1575. spin_lock(&fpq->lock);
  1576. err = -ENOENT;
  1577. if (!fpq->connected)
  1578. goto err_unlock_pq;
  1579. req = request_find(fpq, oh.unique);
  1580. if (!req)
  1581. goto err_unlock_pq;
  1582. /* Is it an interrupt reply? */
  1583. if (req->intr_unique == oh.unique) {
  1584. spin_unlock(&fpq->lock);
  1585. err = -EINVAL;
  1586. if (nbytes != sizeof(struct fuse_out_header))
  1587. goto err_finish;
  1588. if (oh.error == -ENOSYS)
  1589. fc->no_interrupt = 1;
  1590. else if (oh.error == -EAGAIN)
  1591. queue_interrupt(&fc->iq, req);
  1592. fuse_copy_finish(cs);
  1593. return nbytes;
  1594. }
  1595. clear_bit(FR_SENT, &req->flags);
  1596. list_move(&req->list, &fpq->io);
  1597. req->out.h = oh;
  1598. set_bit(FR_LOCKED, &req->flags);
  1599. spin_unlock(&fpq->lock);
  1600. cs->req = req;
  1601. if (!req->out.page_replace)
  1602. cs->move_pages = 0;
  1603. err = copy_out_args(cs, &req->out, nbytes);
  1604. fuse_copy_finish(cs);
  1605. spin_lock(&fpq->lock);
  1606. clear_bit(FR_LOCKED, &req->flags);
  1607. if (!fpq->connected)
  1608. err = -ENOENT;
  1609. else if (err)
  1610. req->out.h.error = -EIO;
  1611. if (!test_bit(FR_PRIVATE, &req->flags))
  1612. list_del_init(&req->list);
  1613. spin_unlock(&fpq->lock);
  1614. request_end(fc, req);
  1615. return err ? err : nbytes;
  1616. err_unlock_pq:
  1617. spin_unlock(&fpq->lock);
  1618. err_finish:
  1619. fuse_copy_finish(cs);
  1620. return err;
  1621. }
  1622. static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
  1623. {
  1624. struct fuse_copy_state cs;
  1625. struct fuse_dev *fud = fuse_get_dev(iocb->ki_filp);
  1626. if (!fud)
  1627. return -EPERM;
  1628. if (!iter_is_iovec(from))
  1629. return -EINVAL;
  1630. fuse_copy_init(&cs, 0, from);
  1631. return fuse_dev_do_write(fud, &cs, iov_iter_count(from));
  1632. }
  1633. static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
  1634. struct file *out, loff_t *ppos,
  1635. size_t len, unsigned int flags)
  1636. {
  1637. unsigned nbuf;
  1638. unsigned idx;
  1639. struct pipe_buffer *bufs;
  1640. struct fuse_copy_state cs;
  1641. struct fuse_dev *fud;
  1642. size_t rem;
  1643. ssize_t ret;
  1644. fud = fuse_get_dev(out);
  1645. if (!fud)
  1646. return -EPERM;
  1647. bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
  1648. if (!bufs)
  1649. return -ENOMEM;
  1650. pipe_lock(pipe);
  1651. nbuf = 0;
  1652. rem = 0;
  1653. for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
  1654. rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
  1655. ret = -EINVAL;
  1656. if (rem < len) {
  1657. pipe_unlock(pipe);
  1658. goto out;
  1659. }
  1660. rem = len;
  1661. while (rem) {
  1662. struct pipe_buffer *ibuf;
  1663. struct pipe_buffer *obuf;
  1664. BUG_ON(nbuf >= pipe->buffers);
  1665. BUG_ON(!pipe->nrbufs);
  1666. ibuf = &pipe->bufs[pipe->curbuf];
  1667. obuf = &bufs[nbuf];
  1668. if (rem >= ibuf->len) {
  1669. *obuf = *ibuf;
  1670. ibuf->ops = NULL;
  1671. pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
  1672. pipe->nrbufs--;
  1673. } else {
  1674. pipe_buf_get(pipe, ibuf);
  1675. *obuf = *ibuf;
  1676. obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
  1677. obuf->len = rem;
  1678. ibuf->offset += obuf->len;
  1679. ibuf->len -= obuf->len;
  1680. }
  1681. nbuf++;
  1682. rem -= obuf->len;
  1683. }
  1684. pipe_unlock(pipe);
  1685. fuse_copy_init(&cs, 0, NULL);
  1686. cs.pipebufs = bufs;
  1687. cs.nr_segs = nbuf;
  1688. cs.pipe = pipe;
  1689. if (flags & SPLICE_F_MOVE)
  1690. cs.move_pages = 1;
  1691. ret = fuse_dev_do_write(fud, &cs, len);
  1692. for (idx = 0; idx < nbuf; idx++)
  1693. pipe_buf_release(pipe, &bufs[idx]);
  1694. out:
  1695. kfree(bufs);
  1696. return ret;
  1697. }
  1698. static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
  1699. {
  1700. unsigned mask = POLLOUT | POLLWRNORM;
  1701. struct fuse_iqueue *fiq;
  1702. struct fuse_dev *fud = fuse_get_dev(file);
  1703. if (!fud)
  1704. return POLLERR;
  1705. fiq = &fud->fc->iq;
  1706. poll_wait(file, &fiq->waitq, wait);
  1707. spin_lock(&fiq->waitq.lock);
  1708. if (!fiq->connected)
  1709. mask = POLLERR;
  1710. else if (request_pending(fiq))
  1711. mask |= POLLIN | POLLRDNORM;
  1712. spin_unlock(&fiq->waitq.lock);
  1713. return mask;
  1714. }
  1715. /*
  1716. * Abort all requests on the given list (pending or processing)
  1717. *
  1718. * This function releases and reacquires fc->lock
  1719. */
  1720. static void end_requests(struct fuse_conn *fc, struct list_head *head)
  1721. {
  1722. while (!list_empty(head)) {
  1723. struct fuse_req *req;
  1724. req = list_entry(head->next, struct fuse_req, list);
  1725. req->out.h.error = -ECONNABORTED;
  1726. clear_bit(FR_SENT, &req->flags);
  1727. list_del_init(&req->list);
  1728. request_end(fc, req);
  1729. }
  1730. }
  1731. static void end_polls(struct fuse_conn *fc)
  1732. {
  1733. struct rb_node *p;
  1734. p = rb_first(&fc->polled_files);
  1735. while (p) {
  1736. struct fuse_file *ff;
  1737. ff = rb_entry(p, struct fuse_file, polled_node);
  1738. wake_up_interruptible_all(&ff->poll_wait);
  1739. p = rb_next(p);
  1740. }
  1741. }
  1742. /*
  1743. * Abort all requests.
  1744. *
  1745. * Emergency exit in case of a malicious or accidental deadlock, or just a hung
  1746. * filesystem.
  1747. *
  1748. * The same effect is usually achievable through killing the filesystem daemon
  1749. * and all users of the filesystem. The exception is the combination of an
  1750. * asynchronous request and the tricky deadlock (see
  1751. * Documentation/filesystems/fuse.txt).
  1752. *
  1753. * Aborting requests under I/O goes as follows: 1: Separate out unlocked
  1754. * requests, they should be finished off immediately. Locked requests will be
  1755. * finished after unlock; see unlock_request(). 2: Finish off the unlocked
  1756. * requests. It is possible that some request will finish before we can. This
  1757. * is OK, the request will in that case be removed from the list before we touch
  1758. * it.
  1759. */
  1760. void fuse_abort_conn(struct fuse_conn *fc)
  1761. {
  1762. struct fuse_iqueue *fiq = &fc->iq;
  1763. spin_lock(&fc->lock);
  1764. if (fc->connected) {
  1765. struct fuse_dev *fud;
  1766. struct fuse_req *req, *next;
  1767. LIST_HEAD(to_end1);
  1768. LIST_HEAD(to_end2);
  1769. fc->connected = 0;
  1770. fc->blocked = 0;
  1771. fuse_set_initialized(fc);
  1772. list_for_each_entry(fud, &fc->devices, entry) {
  1773. struct fuse_pqueue *fpq = &fud->pq;
  1774. spin_lock(&fpq->lock);
  1775. fpq->connected = 0;
  1776. list_for_each_entry_safe(req, next, &fpq->io, list) {
  1777. req->out.h.error = -ECONNABORTED;
  1778. spin_lock(&req->waitq.lock);
  1779. set_bit(FR_ABORTED, &req->flags);
  1780. if (!test_bit(FR_LOCKED, &req->flags)) {
  1781. set_bit(FR_PRIVATE, &req->flags);
  1782. list_move(&req->list, &to_end1);
  1783. }
  1784. spin_unlock(&req->waitq.lock);
  1785. }
  1786. list_splice_init(&fpq->processing, &to_end2);
  1787. spin_unlock(&fpq->lock);
  1788. }
  1789. fc->max_background = UINT_MAX;
  1790. flush_bg_queue(fc);
  1791. spin_lock(&fiq->waitq.lock);
  1792. fiq->connected = 0;
  1793. list_splice_init(&fiq->pending, &to_end2);
  1794. list_for_each_entry(req, &to_end2, list)
  1795. clear_bit(FR_PENDING, &req->flags);
  1796. while (forget_pending(fiq))
  1797. kfree(dequeue_forget(fiq, 1, NULL));
  1798. wake_up_all_locked(&fiq->waitq);
  1799. spin_unlock(&fiq->waitq.lock);
  1800. kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
  1801. end_polls(fc);
  1802. wake_up_all(&fc->blocked_waitq);
  1803. spin_unlock(&fc->lock);
  1804. while (!list_empty(&to_end1)) {
  1805. req = list_first_entry(&to_end1, struct fuse_req, list);
  1806. __fuse_get_request(req);
  1807. list_del_init(&req->list);
  1808. request_end(fc, req);
  1809. }
  1810. end_requests(fc, &to_end2);
  1811. } else {
  1812. spin_unlock(&fc->lock);
  1813. }
  1814. }
  1815. EXPORT_SYMBOL_GPL(fuse_abort_conn);
  1816. int fuse_dev_release(struct inode *inode, struct file *file)
  1817. {
  1818. struct fuse_dev *fud = fuse_get_dev(file);
  1819. if (fud) {
  1820. struct fuse_conn *fc = fud->fc;
  1821. struct fuse_pqueue *fpq = &fud->pq;
  1822. WARN_ON(!list_empty(&fpq->io));
  1823. end_requests(fc, &fpq->processing);
  1824. /* Are we the last open device? */
  1825. if (atomic_dec_and_test(&fc->dev_count)) {
  1826. WARN_ON(fc->iq.fasync != NULL);
  1827. fuse_abort_conn(fc);
  1828. }
  1829. fuse_dev_free(fud);
  1830. }
  1831. return 0;
  1832. }
  1833. EXPORT_SYMBOL_GPL(fuse_dev_release);
  1834. static int fuse_dev_fasync(int fd, struct file *file, int on)
  1835. {
  1836. struct fuse_dev *fud = fuse_get_dev(file);
  1837. if (!fud)
  1838. return -EPERM;
  1839. /* No locking - fasync_helper does its own locking */
  1840. return fasync_helper(fd, file, on, &fud->fc->iq.fasync);
  1841. }
  1842. static int fuse_device_clone(struct fuse_conn *fc, struct file *new)
  1843. {
  1844. struct fuse_dev *fud;
  1845. if (new->private_data)
  1846. return -EINVAL;
  1847. fud = fuse_dev_alloc(fc);
  1848. if (!fud)
  1849. return -ENOMEM;
  1850. new->private_data = fud;
  1851. atomic_inc(&fc->dev_count);
  1852. return 0;
  1853. }
  1854. static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
  1855. unsigned long arg)
  1856. {
  1857. int err = -ENOTTY;
  1858. if (cmd == FUSE_DEV_IOC_CLONE) {
  1859. int oldfd;
  1860. err = -EFAULT;
  1861. if (!get_user(oldfd, (__u32 __user *) arg)) {
  1862. struct file *old = fget(oldfd);
  1863. err = -EINVAL;
  1864. if (old) {
  1865. struct fuse_dev *fud = NULL;
  1866. /*
  1867. * Check against file->f_op because CUSE
  1868. * uses the same ioctl handler.
  1869. */
  1870. if (old->f_op == file->f_op &&
  1871. old->f_cred->user_ns == file->f_cred->user_ns)
  1872. fud = fuse_get_dev(old);
  1873. if (fud) {
  1874. mutex_lock(&fuse_mutex);
  1875. err = fuse_device_clone(fud->fc, file);
  1876. mutex_unlock(&fuse_mutex);
  1877. }
  1878. fput(old);
  1879. }
  1880. }
  1881. }
  1882. return err;
  1883. }
  1884. const struct file_operations fuse_dev_operations = {
  1885. .owner = THIS_MODULE,
  1886. .open = fuse_dev_open,
  1887. .llseek = no_llseek,
  1888. .read_iter = fuse_dev_read,
  1889. .splice_read = fuse_dev_splice_read,
  1890. .write_iter = fuse_dev_write,
  1891. .splice_write = fuse_dev_splice_write,
  1892. .poll = fuse_dev_poll,
  1893. .release = fuse_dev_release,
  1894. .fasync = fuse_dev_fasync,
  1895. .unlocked_ioctl = fuse_dev_ioctl,
  1896. .compat_ioctl = fuse_dev_ioctl,
  1897. };
  1898. EXPORT_SYMBOL_GPL(fuse_dev_operations);
  1899. static struct miscdevice fuse_miscdevice = {
  1900. .minor = FUSE_MINOR,
  1901. .name = "fuse",
  1902. .fops = &fuse_dev_operations,
  1903. };
  1904. int __init fuse_dev_init(void)
  1905. {
  1906. int err = -ENOMEM;
  1907. fuse_req_cachep = kmem_cache_create("fuse_request",
  1908. sizeof(struct fuse_req),
  1909. 0, 0, NULL);
  1910. if (!fuse_req_cachep)
  1911. goto out;
  1912. err = misc_register(&fuse_miscdevice);
  1913. if (err)
  1914. goto out_cache_clean;
  1915. return 0;
  1916. out_cache_clean:
  1917. kmem_cache_destroy(fuse_req_cachep);
  1918. out:
  1919. return err;
  1920. }
  1921. void fuse_dev_cleanup(void)
  1922. {
  1923. misc_deregister(&fuse_miscdevice);
  1924. kmem_cache_destroy(fuse_req_cachep);
  1925. }