rdwr.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983
  1. /* Storage object read/write
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/mount.h>
  12. #include <linux/slab.h>
  13. #include <linux/file.h>
  14. #include <linux/swap.h>
  15. #include "internal.h"
  16. /*
  17. * detect wake up events generated by the unlocking of pages in which we're
  18. * interested
  19. * - we use this to detect read completion of backing pages
  20. * - the caller holds the waitqueue lock
  21. */
  22. static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
  23. int sync, void *_key)
  24. {
  25. struct cachefiles_one_read *monitor =
  26. container_of(wait, struct cachefiles_one_read, monitor);
  27. struct cachefiles_object *object;
  28. struct wait_bit_key *key = _key;
  29. struct page *page = wait->private;
  30. ASSERT(key);
  31. _enter("{%lu},%u,%d,{%p,%u}",
  32. monitor->netfs_page->index, mode, sync,
  33. key->flags, key->bit_nr);
  34. if (key->flags != &page->flags ||
  35. key->bit_nr != PG_locked)
  36. return 0;
  37. _debug("--- monitor %p %lx ---", page, page->flags);
  38. if (!PageUptodate(page) && !PageError(page)) {
  39. /* unlocked, not uptodate and not erronous? */
  40. _debug("page probably truncated");
  41. }
  42. /* remove from the waitqueue */
  43. list_del(&wait->task_list);
  44. /* move onto the action list and queue for FS-Cache thread pool */
  45. ASSERT(monitor->op);
  46. object = container_of(monitor->op->op.object,
  47. struct cachefiles_object, fscache);
  48. spin_lock(&object->work_lock);
  49. list_add_tail(&monitor->op_link, &monitor->op->to_do);
  50. spin_unlock(&object->work_lock);
  51. fscache_enqueue_retrieval(monitor->op);
  52. return 0;
  53. }
  54. /*
  55. * handle a probably truncated page
  56. * - check to see if the page is still relevant and reissue the read if
  57. * possible
  58. * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
  59. * must wait again and 0 if successful
  60. */
  61. static int cachefiles_read_reissue(struct cachefiles_object *object,
  62. struct cachefiles_one_read *monitor)
  63. {
  64. struct address_space *bmapping = object->backer->d_inode->i_mapping;
  65. struct page *backpage = monitor->back_page, *backpage2;
  66. int ret;
  67. _enter("{ino=%lx},{%lx,%lx}",
  68. object->backer->d_inode->i_ino,
  69. backpage->index, backpage->flags);
  70. /* skip if the page was truncated away completely */
  71. if (backpage->mapping != bmapping) {
  72. _leave(" = -ENODATA [mapping]");
  73. return -ENODATA;
  74. }
  75. backpage2 = find_get_page(bmapping, backpage->index);
  76. if (!backpage2) {
  77. _leave(" = -ENODATA [gone]");
  78. return -ENODATA;
  79. }
  80. if (backpage != backpage2) {
  81. put_page(backpage2);
  82. _leave(" = -ENODATA [different]");
  83. return -ENODATA;
  84. }
  85. /* the page is still there and we already have a ref on it, so we don't
  86. * need a second */
  87. put_page(backpage2);
  88. INIT_LIST_HEAD(&monitor->op_link);
  89. add_page_wait_queue(backpage, &monitor->monitor);
  90. if (trylock_page(backpage)) {
  91. ret = -EIO;
  92. if (PageError(backpage))
  93. goto unlock_discard;
  94. ret = 0;
  95. if (PageUptodate(backpage))
  96. goto unlock_discard;
  97. _debug("reissue read");
  98. ret = bmapping->a_ops->readpage(NULL, backpage);
  99. if (ret < 0)
  100. goto unlock_discard;
  101. }
  102. /* but the page may have been read before the monitor was installed, so
  103. * the monitor may miss the event - so we have to ensure that we do get
  104. * one in such a case */
  105. if (trylock_page(backpage)) {
  106. _debug("jumpstart %p {%lx}", backpage, backpage->flags);
  107. unlock_page(backpage);
  108. }
  109. /* it'll reappear on the todo list */
  110. _leave(" = -EINPROGRESS");
  111. return -EINPROGRESS;
  112. unlock_discard:
  113. unlock_page(backpage);
  114. spin_lock_irq(&object->work_lock);
  115. list_del(&monitor->op_link);
  116. spin_unlock_irq(&object->work_lock);
  117. _leave(" = %d", ret);
  118. return ret;
  119. }
  120. /*
  121. * copy data from backing pages to netfs pages to complete a read operation
  122. * - driven by FS-Cache's thread pool
  123. */
  124. static void cachefiles_read_copier(struct fscache_operation *_op)
  125. {
  126. struct cachefiles_one_read *monitor;
  127. struct cachefiles_object *object;
  128. struct fscache_retrieval *op;
  129. struct pagevec pagevec;
  130. int error, max;
  131. op = container_of(_op, struct fscache_retrieval, op);
  132. object = container_of(op->op.object,
  133. struct cachefiles_object, fscache);
  134. _enter("{ino=%lu}", object->backer->d_inode->i_ino);
  135. pagevec_init(&pagevec, 0);
  136. max = 8;
  137. spin_lock_irq(&object->work_lock);
  138. while (!list_empty(&op->to_do)) {
  139. monitor = list_entry(op->to_do.next,
  140. struct cachefiles_one_read, op_link);
  141. list_del(&monitor->op_link);
  142. spin_unlock_irq(&object->work_lock);
  143. _debug("- copy {%lu}", monitor->back_page->index);
  144. recheck:
  145. if (test_bit(FSCACHE_COOKIE_INVALIDATING,
  146. &object->fscache.cookie->flags)) {
  147. error = -ESTALE;
  148. } else if (PageUptodate(monitor->back_page)) {
  149. copy_highpage(monitor->netfs_page, monitor->back_page);
  150. fscache_mark_page_cached(monitor->op,
  151. monitor->netfs_page);
  152. error = 0;
  153. } else if (!PageError(monitor->back_page)) {
  154. /* the page has probably been truncated */
  155. error = cachefiles_read_reissue(object, monitor);
  156. if (error == -EINPROGRESS)
  157. goto next;
  158. goto recheck;
  159. } else {
  160. cachefiles_io_error_obj(
  161. object,
  162. "Readpage failed on backing file %lx",
  163. (unsigned long) monitor->back_page->flags);
  164. error = -EIO;
  165. }
  166. page_cache_release(monitor->back_page);
  167. fscache_end_io(op, monitor->netfs_page, error);
  168. page_cache_release(monitor->netfs_page);
  169. fscache_retrieval_complete(op, 1);
  170. fscache_put_retrieval(op);
  171. kfree(monitor);
  172. next:
  173. /* let the thread pool have some air occasionally */
  174. max--;
  175. if (max < 0 || need_resched()) {
  176. if (!list_empty(&op->to_do))
  177. fscache_enqueue_retrieval(op);
  178. _leave(" [maxed out]");
  179. return;
  180. }
  181. spin_lock_irq(&object->work_lock);
  182. }
  183. spin_unlock_irq(&object->work_lock);
  184. _leave("");
  185. }
  186. /*
  187. * read the corresponding page to the given set from the backing file
  188. * - an uncertain page is simply discarded, to be tried again another time
  189. */
  190. static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
  191. struct fscache_retrieval *op,
  192. struct page *netpage)
  193. {
  194. struct cachefiles_one_read *monitor;
  195. struct address_space *bmapping;
  196. struct page *newpage, *backpage;
  197. int ret;
  198. _enter("");
  199. _debug("read back %p{%lu,%d}",
  200. netpage, netpage->index, page_count(netpage));
  201. monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
  202. if (!monitor)
  203. goto nomem;
  204. monitor->netfs_page = netpage;
  205. monitor->op = fscache_get_retrieval(op);
  206. init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
  207. /* attempt to get hold of the backing page */
  208. bmapping = object->backer->d_inode->i_mapping;
  209. newpage = NULL;
  210. for (;;) {
  211. backpage = find_get_page(bmapping, netpage->index);
  212. if (backpage)
  213. goto backing_page_already_present;
  214. if (!newpage) {
  215. newpage = __page_cache_alloc(cachefiles_gfp |
  216. __GFP_COLD);
  217. if (!newpage)
  218. goto nomem_monitor;
  219. }
  220. ret = add_to_page_cache_lru(newpage, bmapping,
  221. netpage->index, cachefiles_gfp);
  222. if (ret == 0)
  223. goto installed_new_backing_page;
  224. if (ret != -EEXIST)
  225. goto nomem_page;
  226. }
  227. /* we've installed a new backing page, so now we need to start
  228. * it reading */
  229. installed_new_backing_page:
  230. _debug("- new %p", newpage);
  231. backpage = newpage;
  232. newpage = NULL;
  233. read_backing_page:
  234. ret = bmapping->a_ops->readpage(NULL, backpage);
  235. if (ret < 0)
  236. goto read_error;
  237. /* set the monitor to transfer the data across */
  238. monitor_backing_page:
  239. _debug("- monitor add");
  240. /* install the monitor */
  241. page_cache_get(monitor->netfs_page);
  242. page_cache_get(backpage);
  243. monitor->back_page = backpage;
  244. monitor->monitor.private = backpage;
  245. add_page_wait_queue(backpage, &monitor->monitor);
  246. monitor = NULL;
  247. /* but the page may have been read before the monitor was installed, so
  248. * the monitor may miss the event - so we have to ensure that we do get
  249. * one in such a case */
  250. if (trylock_page(backpage)) {
  251. _debug("jumpstart %p {%lx}", backpage, backpage->flags);
  252. unlock_page(backpage);
  253. }
  254. goto success;
  255. /* if the backing page is already present, it can be in one of
  256. * three states: read in progress, read failed or read okay */
  257. backing_page_already_present:
  258. _debug("- present");
  259. if (newpage) {
  260. page_cache_release(newpage);
  261. newpage = NULL;
  262. }
  263. if (PageError(backpage))
  264. goto io_error;
  265. if (PageUptodate(backpage))
  266. goto backing_page_already_uptodate;
  267. if (!trylock_page(backpage))
  268. goto monitor_backing_page;
  269. _debug("read %p {%lx}", backpage, backpage->flags);
  270. goto read_backing_page;
  271. /* the backing page is already up to date, attach the netfs
  272. * page to the pagecache and LRU and copy the data across */
  273. backing_page_already_uptodate:
  274. _debug("- uptodate");
  275. fscache_mark_page_cached(op, netpage);
  276. copy_highpage(netpage, backpage);
  277. fscache_end_io(op, netpage, 0);
  278. fscache_retrieval_complete(op, 1);
  279. success:
  280. _debug("success");
  281. ret = 0;
  282. out:
  283. if (backpage)
  284. page_cache_release(backpage);
  285. if (monitor) {
  286. fscache_put_retrieval(monitor->op);
  287. kfree(monitor);
  288. }
  289. _leave(" = %d", ret);
  290. return ret;
  291. read_error:
  292. _debug("read error %d", ret);
  293. if (ret == -ENOMEM) {
  294. fscache_retrieval_complete(op, 1);
  295. goto out;
  296. }
  297. io_error:
  298. cachefiles_io_error_obj(object, "Page read error on backing file");
  299. fscache_retrieval_complete(op, 1);
  300. ret = -ENOBUFS;
  301. goto out;
  302. nomem_page:
  303. page_cache_release(newpage);
  304. nomem_monitor:
  305. fscache_put_retrieval(monitor->op);
  306. kfree(monitor);
  307. nomem:
  308. fscache_retrieval_complete(op, 1);
  309. _leave(" = -ENOMEM");
  310. return -ENOMEM;
  311. }
  312. /*
  313. * read a page from the cache or allocate a block in which to store it
  314. * - cache withdrawal is prevented by the caller
  315. * - returns -EINTR if interrupted
  316. * - returns -ENOMEM if ran out of memory
  317. * - returns -ENOBUFS if no buffers can be made available
  318. * - returns -ENOBUFS if page is beyond EOF
  319. * - if the page is backed by a block in the cache:
  320. * - a read will be started which will call the callback on completion
  321. * - 0 will be returned
  322. * - else if the page is unbacked:
  323. * - the metadata will be retained
  324. * - -ENODATA will be returned
  325. */
  326. int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
  327. struct page *page,
  328. gfp_t gfp)
  329. {
  330. struct cachefiles_object *object;
  331. struct cachefiles_cache *cache;
  332. struct pagevec pagevec;
  333. struct inode *inode;
  334. sector_t block0, block;
  335. unsigned shift;
  336. int ret;
  337. object = container_of(op->op.object,
  338. struct cachefiles_object, fscache);
  339. cache = container_of(object->fscache.cache,
  340. struct cachefiles_cache, cache);
  341. _enter("{%p},{%lx},,,", object, page->index);
  342. if (!object->backer)
  343. goto enobufs;
  344. inode = object->backer->d_inode;
  345. ASSERT(S_ISREG(inode->i_mode));
  346. ASSERT(inode->i_mapping->a_ops->bmap);
  347. ASSERT(inode->i_mapping->a_ops->readpages);
  348. /* calculate the shift required to use bmap */
  349. if (inode->i_sb->s_blocksize > PAGE_SIZE)
  350. goto enobufs;
  351. shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
  352. op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
  353. op->op.flags |= FSCACHE_OP_ASYNC;
  354. op->op.processor = cachefiles_read_copier;
  355. pagevec_init(&pagevec, 0);
  356. /* we assume the absence or presence of the first block is a good
  357. * enough indication for the page as a whole
  358. * - TODO: don't use bmap() for this as it is _not_ actually good
  359. * enough for this as it doesn't indicate errors, but it's all we've
  360. * got for the moment
  361. */
  362. block0 = page->index;
  363. block0 <<= shift;
  364. block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
  365. _debug("%llx -> %llx",
  366. (unsigned long long) block0,
  367. (unsigned long long) block);
  368. if (block) {
  369. /* submit the apparently valid page to the backing fs to be
  370. * read from disk */
  371. ret = cachefiles_read_backing_file_one(object, op, page);
  372. } else if (cachefiles_has_space(cache, 0, 1) == 0) {
  373. /* there's space in the cache we can use */
  374. fscache_mark_page_cached(op, page);
  375. fscache_retrieval_complete(op, 1);
  376. ret = -ENODATA;
  377. } else {
  378. goto enobufs;
  379. }
  380. _leave(" = %d", ret);
  381. return ret;
  382. enobufs:
  383. fscache_retrieval_complete(op, 1);
  384. _leave(" = -ENOBUFS");
  385. return -ENOBUFS;
  386. }
  387. /*
  388. * read the corresponding pages to the given set from the backing file
  389. * - any uncertain pages are simply discarded, to be tried again another time
  390. */
  391. static int cachefiles_read_backing_file(struct cachefiles_object *object,
  392. struct fscache_retrieval *op,
  393. struct list_head *list)
  394. {
  395. struct cachefiles_one_read *monitor = NULL;
  396. struct address_space *bmapping = object->backer->d_inode->i_mapping;
  397. struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
  398. int ret = 0;
  399. _enter("");
  400. list_for_each_entry_safe(netpage, _n, list, lru) {
  401. list_del(&netpage->lru);
  402. _debug("read back %p{%lu,%d}",
  403. netpage, netpage->index, page_count(netpage));
  404. if (!monitor) {
  405. monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
  406. if (!monitor)
  407. goto nomem;
  408. monitor->op = fscache_get_retrieval(op);
  409. init_waitqueue_func_entry(&monitor->monitor,
  410. cachefiles_read_waiter);
  411. }
  412. for (;;) {
  413. backpage = find_get_page(bmapping, netpage->index);
  414. if (backpage)
  415. goto backing_page_already_present;
  416. if (!newpage) {
  417. newpage = __page_cache_alloc(cachefiles_gfp |
  418. __GFP_COLD);
  419. if (!newpage)
  420. goto nomem;
  421. }
  422. ret = add_to_page_cache_lru(newpage, bmapping,
  423. netpage->index,
  424. cachefiles_gfp);
  425. if (ret == 0)
  426. goto installed_new_backing_page;
  427. if (ret != -EEXIST)
  428. goto nomem;
  429. }
  430. /* we've installed a new backing page, so now we need
  431. * to start it reading */
  432. installed_new_backing_page:
  433. _debug("- new %p", newpage);
  434. backpage = newpage;
  435. newpage = NULL;
  436. reread_backing_page:
  437. ret = bmapping->a_ops->readpage(NULL, backpage);
  438. if (ret < 0)
  439. goto read_error;
  440. /* add the netfs page to the pagecache and LRU, and set the
  441. * monitor to transfer the data across */
  442. monitor_backing_page:
  443. _debug("- monitor add");
  444. ret = add_to_page_cache_lru(netpage, op->mapping,
  445. netpage->index, cachefiles_gfp);
  446. if (ret < 0) {
  447. if (ret == -EEXIST) {
  448. page_cache_release(netpage);
  449. fscache_retrieval_complete(op, 1);
  450. continue;
  451. }
  452. goto nomem;
  453. }
  454. /* install a monitor */
  455. page_cache_get(netpage);
  456. monitor->netfs_page = netpage;
  457. page_cache_get(backpage);
  458. monitor->back_page = backpage;
  459. monitor->monitor.private = backpage;
  460. add_page_wait_queue(backpage, &monitor->monitor);
  461. monitor = NULL;
  462. /* but the page may have been read before the monitor was
  463. * installed, so the monitor may miss the event - so we have to
  464. * ensure that we do get one in such a case */
  465. if (trylock_page(backpage)) {
  466. _debug("2unlock %p {%lx}", backpage, backpage->flags);
  467. unlock_page(backpage);
  468. }
  469. page_cache_release(backpage);
  470. backpage = NULL;
  471. page_cache_release(netpage);
  472. netpage = NULL;
  473. continue;
  474. /* if the backing page is already present, it can be in one of
  475. * three states: read in progress, read failed or read okay */
  476. backing_page_already_present:
  477. _debug("- present %p", backpage);
  478. if (PageError(backpage))
  479. goto io_error;
  480. if (PageUptodate(backpage))
  481. goto backing_page_already_uptodate;
  482. _debug("- not ready %p{%lx}", backpage, backpage->flags);
  483. if (!trylock_page(backpage))
  484. goto monitor_backing_page;
  485. if (PageError(backpage)) {
  486. _debug("error %lx", backpage->flags);
  487. unlock_page(backpage);
  488. goto io_error;
  489. }
  490. if (PageUptodate(backpage))
  491. goto backing_page_already_uptodate_unlock;
  492. /* we've locked a page that's neither up to date nor erroneous,
  493. * so we need to attempt to read it again */
  494. goto reread_backing_page;
  495. /* the backing page is already up to date, attach the netfs
  496. * page to the pagecache and LRU and copy the data across */
  497. backing_page_already_uptodate_unlock:
  498. _debug("uptodate %lx", backpage->flags);
  499. unlock_page(backpage);
  500. backing_page_already_uptodate:
  501. _debug("- uptodate");
  502. ret = add_to_page_cache_lru(netpage, op->mapping,
  503. netpage->index, cachefiles_gfp);
  504. if (ret < 0) {
  505. if (ret == -EEXIST) {
  506. page_cache_release(netpage);
  507. fscache_retrieval_complete(op, 1);
  508. continue;
  509. }
  510. goto nomem;
  511. }
  512. copy_highpage(netpage, backpage);
  513. page_cache_release(backpage);
  514. backpage = NULL;
  515. fscache_mark_page_cached(op, netpage);
  516. /* the netpage is unlocked and marked up to date here */
  517. fscache_end_io(op, netpage, 0);
  518. page_cache_release(netpage);
  519. netpage = NULL;
  520. fscache_retrieval_complete(op, 1);
  521. continue;
  522. }
  523. netpage = NULL;
  524. _debug("out");
  525. out:
  526. /* tidy up */
  527. if (newpage)
  528. page_cache_release(newpage);
  529. if (netpage)
  530. page_cache_release(netpage);
  531. if (backpage)
  532. page_cache_release(backpage);
  533. if (monitor) {
  534. fscache_put_retrieval(op);
  535. kfree(monitor);
  536. }
  537. list_for_each_entry_safe(netpage, _n, list, lru) {
  538. list_del(&netpage->lru);
  539. page_cache_release(netpage);
  540. fscache_retrieval_complete(op, 1);
  541. }
  542. _leave(" = %d", ret);
  543. return ret;
  544. nomem:
  545. _debug("nomem");
  546. ret = -ENOMEM;
  547. goto record_page_complete;
  548. read_error:
  549. _debug("read error %d", ret);
  550. if (ret == -ENOMEM)
  551. goto record_page_complete;
  552. io_error:
  553. cachefiles_io_error_obj(object, "Page read error on backing file");
  554. ret = -ENOBUFS;
  555. record_page_complete:
  556. fscache_retrieval_complete(op, 1);
  557. goto out;
  558. }
  559. /*
  560. * read a list of pages from the cache or allocate blocks in which to store
  561. * them
  562. */
  563. int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
  564. struct list_head *pages,
  565. unsigned *nr_pages,
  566. gfp_t gfp)
  567. {
  568. struct cachefiles_object *object;
  569. struct cachefiles_cache *cache;
  570. struct list_head backpages;
  571. struct pagevec pagevec;
  572. struct inode *inode;
  573. struct page *page, *_n;
  574. unsigned shift, nrbackpages;
  575. int ret, ret2, space;
  576. object = container_of(op->op.object,
  577. struct cachefiles_object, fscache);
  578. cache = container_of(object->fscache.cache,
  579. struct cachefiles_cache, cache);
  580. _enter("{OBJ%x,%d},,%d,,",
  581. object->fscache.debug_id, atomic_read(&op->op.usage),
  582. *nr_pages);
  583. if (!object->backer)
  584. goto all_enobufs;
  585. space = 1;
  586. if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
  587. space = 0;
  588. inode = object->backer->d_inode;
  589. ASSERT(S_ISREG(inode->i_mode));
  590. ASSERT(inode->i_mapping->a_ops->bmap);
  591. ASSERT(inode->i_mapping->a_ops->readpages);
  592. /* calculate the shift required to use bmap */
  593. if (inode->i_sb->s_blocksize > PAGE_SIZE)
  594. goto all_enobufs;
  595. shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
  596. pagevec_init(&pagevec, 0);
  597. op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
  598. op->op.flags |= FSCACHE_OP_ASYNC;
  599. op->op.processor = cachefiles_read_copier;
  600. INIT_LIST_HEAD(&backpages);
  601. nrbackpages = 0;
  602. ret = space ? -ENODATA : -ENOBUFS;
  603. list_for_each_entry_safe(page, _n, pages, lru) {
  604. sector_t block0, block;
  605. /* we assume the absence or presence of the first block is a
  606. * good enough indication for the page as a whole
  607. * - TODO: don't use bmap() for this as it is _not_ actually
  608. * good enough for this as it doesn't indicate errors, but
  609. * it's all we've got for the moment
  610. */
  611. block0 = page->index;
  612. block0 <<= shift;
  613. block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
  614. block0);
  615. _debug("%llx -> %llx",
  616. (unsigned long long) block0,
  617. (unsigned long long) block);
  618. if (block) {
  619. /* we have data - add it to the list to give to the
  620. * backing fs */
  621. list_move(&page->lru, &backpages);
  622. (*nr_pages)--;
  623. nrbackpages++;
  624. } else if (space && pagevec_add(&pagevec, page) == 0) {
  625. fscache_mark_pages_cached(op, &pagevec);
  626. fscache_retrieval_complete(op, 1);
  627. ret = -ENODATA;
  628. } else {
  629. fscache_retrieval_complete(op, 1);
  630. }
  631. }
  632. if (pagevec_count(&pagevec) > 0)
  633. fscache_mark_pages_cached(op, &pagevec);
  634. if (list_empty(pages))
  635. ret = 0;
  636. /* submit the apparently valid pages to the backing fs to be read from
  637. * disk */
  638. if (nrbackpages > 0) {
  639. ret2 = cachefiles_read_backing_file(object, op, &backpages);
  640. if (ret2 == -ENOMEM || ret2 == -EINTR)
  641. ret = ret2;
  642. }
  643. _leave(" = %d [nr=%u%s]",
  644. ret, *nr_pages, list_empty(pages) ? " empty" : "");
  645. return ret;
  646. all_enobufs:
  647. fscache_retrieval_complete(op, *nr_pages);
  648. return -ENOBUFS;
  649. }
  650. /*
  651. * allocate a block in the cache in which to store a page
  652. * - cache withdrawal is prevented by the caller
  653. * - returns -EINTR if interrupted
  654. * - returns -ENOMEM if ran out of memory
  655. * - returns -ENOBUFS if no buffers can be made available
  656. * - returns -ENOBUFS if page is beyond EOF
  657. * - otherwise:
  658. * - the metadata will be retained
  659. * - 0 will be returned
  660. */
  661. int cachefiles_allocate_page(struct fscache_retrieval *op,
  662. struct page *page,
  663. gfp_t gfp)
  664. {
  665. struct cachefiles_object *object;
  666. struct cachefiles_cache *cache;
  667. int ret;
  668. object = container_of(op->op.object,
  669. struct cachefiles_object, fscache);
  670. cache = container_of(object->fscache.cache,
  671. struct cachefiles_cache, cache);
  672. _enter("%p,{%lx},", object, page->index);
  673. ret = cachefiles_has_space(cache, 0, 1);
  674. if (ret == 0)
  675. fscache_mark_page_cached(op, page);
  676. else
  677. ret = -ENOBUFS;
  678. fscache_retrieval_complete(op, 1);
  679. _leave(" = %d", ret);
  680. return ret;
  681. }
  682. /*
  683. * allocate blocks in the cache in which to store a set of pages
  684. * - cache withdrawal is prevented by the caller
  685. * - returns -EINTR if interrupted
  686. * - returns -ENOMEM if ran out of memory
  687. * - returns -ENOBUFS if some buffers couldn't be made available
  688. * - returns -ENOBUFS if some pages are beyond EOF
  689. * - otherwise:
  690. * - -ENODATA will be returned
  691. * - metadata will be retained for any page marked
  692. */
  693. int cachefiles_allocate_pages(struct fscache_retrieval *op,
  694. struct list_head *pages,
  695. unsigned *nr_pages,
  696. gfp_t gfp)
  697. {
  698. struct cachefiles_object *object;
  699. struct cachefiles_cache *cache;
  700. struct pagevec pagevec;
  701. struct page *page;
  702. int ret;
  703. object = container_of(op->op.object,
  704. struct cachefiles_object, fscache);
  705. cache = container_of(object->fscache.cache,
  706. struct cachefiles_cache, cache);
  707. _enter("%p,,,%d,", object, *nr_pages);
  708. ret = cachefiles_has_space(cache, 0, *nr_pages);
  709. if (ret == 0) {
  710. pagevec_init(&pagevec, 0);
  711. list_for_each_entry(page, pages, lru) {
  712. if (pagevec_add(&pagevec, page) == 0)
  713. fscache_mark_pages_cached(op, &pagevec);
  714. }
  715. if (pagevec_count(&pagevec) > 0)
  716. fscache_mark_pages_cached(op, &pagevec);
  717. ret = -ENODATA;
  718. } else {
  719. ret = -ENOBUFS;
  720. }
  721. fscache_retrieval_complete(op, *nr_pages);
  722. _leave(" = %d", ret);
  723. return ret;
  724. }
  725. /*
  726. * request a page be stored in the cache
  727. * - cache withdrawal is prevented by the caller
  728. * - this request may be ignored if there's no cache block available, in which
  729. * case -ENOBUFS will be returned
  730. * - if the op is in progress, 0 will be returned
  731. */
  732. int cachefiles_write_page(struct fscache_storage *op, struct page *page)
  733. {
  734. struct cachefiles_object *object;
  735. struct cachefiles_cache *cache;
  736. mm_segment_t old_fs;
  737. struct file *file;
  738. struct path path;
  739. loff_t pos, eof;
  740. size_t len;
  741. void *data;
  742. int ret;
  743. ASSERT(op != NULL);
  744. ASSERT(page != NULL);
  745. object = container_of(op->op.object,
  746. struct cachefiles_object, fscache);
  747. _enter("%p,%p{%lx},,,", object, page, page->index);
  748. if (!object->backer) {
  749. _leave(" = -ENOBUFS");
  750. return -ENOBUFS;
  751. }
  752. ASSERT(S_ISREG(object->backer->d_inode->i_mode));
  753. cache = container_of(object->fscache.cache,
  754. struct cachefiles_cache, cache);
  755. /* write the page to the backing filesystem and let it store it in its
  756. * own time */
  757. path.mnt = cache->mnt;
  758. path.dentry = object->backer;
  759. file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
  760. if (IS_ERR(file)) {
  761. ret = PTR_ERR(file);
  762. } else {
  763. ret = -EIO;
  764. if (file->f_op->write) {
  765. pos = (loff_t) page->index << PAGE_SHIFT;
  766. /* we mustn't write more data than we have, so we have
  767. * to beware of a partial page at EOF */
  768. eof = object->fscache.store_limit_l;
  769. len = PAGE_SIZE;
  770. if (eof & ~PAGE_MASK) {
  771. ASSERTCMP(pos, <, eof);
  772. if (eof - pos < PAGE_SIZE) {
  773. _debug("cut short %llx to %llx",
  774. pos, eof);
  775. len = eof - pos;
  776. ASSERTCMP(pos + len, ==, eof);
  777. }
  778. }
  779. data = kmap(page);
  780. file_start_write(file);
  781. old_fs = get_fs();
  782. set_fs(KERNEL_DS);
  783. ret = file->f_op->write(
  784. file, (const void __user *) data, len, &pos);
  785. set_fs(old_fs);
  786. kunmap(page);
  787. file_end_write(file);
  788. if (ret != len)
  789. ret = -EIO;
  790. }
  791. fput(file);
  792. }
  793. if (ret < 0) {
  794. if (ret == -EIO)
  795. cachefiles_io_error_obj(
  796. object, "Write page to backing file failed");
  797. ret = -ENOBUFS;
  798. }
  799. _leave(" = %d", ret);
  800. return ret;
  801. }
  802. /*
  803. * detach a backing block from a page
  804. * - cache withdrawal is prevented by the caller
  805. */
  806. void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
  807. {
  808. struct cachefiles_object *object;
  809. struct cachefiles_cache *cache;
  810. object = container_of(_object, struct cachefiles_object, fscache);
  811. cache = container_of(object->fscache.cache,
  812. struct cachefiles_cache, cache);
  813. _enter("%p,{%lu}", object, page->index);
  814. spin_unlock(&object->fscache.cookie->lock);
  815. }