page.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. /*
  19. * check to see if a page is being written to the cache
  20. */
  21. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  22. {
  23. void *val;
  24. rcu_read_lock();
  25. val = radix_tree_lookup(&cookie->stores, page->index);
  26. rcu_read_unlock();
  27. return val != NULL;
  28. }
  29. EXPORT_SYMBOL(__fscache_check_page_write);
  30. /*
  31. * wait for a page to finish being written to the cache
  32. */
  33. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  34. {
  35. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  36. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  37. }
  38. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  39. /*
  40. * wait for a page to finish being written to the cache. Put a timeout here
  41. * since we might be called recursively via parent fs.
  42. */
  43. static
  44. bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
  45. {
  46. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  47. return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
  48. HZ);
  49. }
  50. /*
  51. * decide whether a page can be released, possibly by cancelling a store to it
  52. * - we're allowed to sleep if __GFP_WAIT is flagged
  53. */
  54. bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
  55. struct page *page,
  56. gfp_t gfp)
  57. {
  58. struct page *xpage;
  59. void *val;
  60. _enter("%p,%p,%x", cookie, page, gfp);
  61. try_again:
  62. rcu_read_lock();
  63. val = radix_tree_lookup(&cookie->stores, page->index);
  64. if (!val) {
  65. rcu_read_unlock();
  66. fscache_stat(&fscache_n_store_vmscan_not_storing);
  67. __fscache_uncache_page(cookie, page);
  68. return true;
  69. }
  70. /* see if the page is actually undergoing storage - if so we can't get
  71. * rid of it till the cache has finished with it */
  72. if (radix_tree_tag_get(&cookie->stores, page->index,
  73. FSCACHE_COOKIE_STORING_TAG)) {
  74. rcu_read_unlock();
  75. goto page_busy;
  76. }
  77. /* the page is pending storage, so we attempt to cancel the store and
  78. * discard the store request so that the page can be reclaimed */
  79. spin_lock(&cookie->stores_lock);
  80. rcu_read_unlock();
  81. if (radix_tree_tag_get(&cookie->stores, page->index,
  82. FSCACHE_COOKIE_STORING_TAG)) {
  83. /* the page started to undergo storage whilst we were looking,
  84. * so now we can only wait or return */
  85. spin_unlock(&cookie->stores_lock);
  86. goto page_busy;
  87. }
  88. xpage = radix_tree_delete(&cookie->stores, page->index);
  89. spin_unlock(&cookie->stores_lock);
  90. if (xpage) {
  91. fscache_stat(&fscache_n_store_vmscan_cancelled);
  92. fscache_stat(&fscache_n_store_radix_deletes);
  93. ASSERTCMP(xpage, ==, page);
  94. } else {
  95. fscache_stat(&fscache_n_store_vmscan_gone);
  96. }
  97. wake_up_bit(&cookie->flags, 0);
  98. if (xpage)
  99. page_cache_release(xpage);
  100. __fscache_uncache_page(cookie, page);
  101. return true;
  102. page_busy:
  103. /* We will wait here if we're allowed to, but that could deadlock the
  104. * allocator as the work threads writing to the cache may all end up
  105. * sleeping on memory allocation, so we may need to impose a timeout
  106. * too. */
  107. if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
  108. fscache_stat(&fscache_n_store_vmscan_busy);
  109. return false;
  110. }
  111. fscache_stat(&fscache_n_store_vmscan_wait);
  112. if (!release_page_wait_timeout(cookie, page))
  113. _debug("fscache writeout timeout page: %p{%lx}",
  114. page, page->index);
  115. gfp &= ~__GFP_WAIT;
  116. goto try_again;
  117. }
  118. EXPORT_SYMBOL(__fscache_maybe_release_page);
  119. /*
  120. * note that a page has finished being written to the cache
  121. */
  122. static void fscache_end_page_write(struct fscache_object *object,
  123. struct page *page)
  124. {
  125. struct fscache_cookie *cookie;
  126. struct page *xpage = NULL;
  127. spin_lock(&object->lock);
  128. cookie = object->cookie;
  129. if (cookie) {
  130. /* delete the page from the tree if it is now no longer
  131. * pending */
  132. spin_lock(&cookie->stores_lock);
  133. radix_tree_tag_clear(&cookie->stores, page->index,
  134. FSCACHE_COOKIE_STORING_TAG);
  135. if (!radix_tree_tag_get(&cookie->stores, page->index,
  136. FSCACHE_COOKIE_PENDING_TAG)) {
  137. fscache_stat(&fscache_n_store_radix_deletes);
  138. xpage = radix_tree_delete(&cookie->stores, page->index);
  139. }
  140. spin_unlock(&cookie->stores_lock);
  141. wake_up_bit(&cookie->flags, 0);
  142. }
  143. spin_unlock(&object->lock);
  144. if (xpage)
  145. page_cache_release(xpage);
  146. }
  147. /*
  148. * actually apply the changed attributes to a cache object
  149. */
  150. static void fscache_attr_changed_op(struct fscache_operation *op)
  151. {
  152. struct fscache_object *object = op->object;
  153. int ret;
  154. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  155. fscache_stat(&fscache_n_attr_changed_calls);
  156. if (fscache_object_is_active(object)) {
  157. fscache_stat(&fscache_n_cop_attr_changed);
  158. ret = object->cache->ops->attr_changed(object);
  159. fscache_stat_d(&fscache_n_cop_attr_changed);
  160. if (ret < 0)
  161. fscache_abort_object(object);
  162. }
  163. fscache_op_complete(op, true);
  164. _leave("");
  165. }
  166. /*
  167. * notification that the attributes on an object have changed
  168. */
  169. int __fscache_attr_changed(struct fscache_cookie *cookie)
  170. {
  171. struct fscache_operation *op;
  172. struct fscache_object *object;
  173. bool wake_cookie = false;
  174. _enter("%p", cookie);
  175. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  176. fscache_stat(&fscache_n_attr_changed);
  177. op = kzalloc(sizeof(*op), GFP_KERNEL);
  178. if (!op) {
  179. fscache_stat(&fscache_n_attr_changed_nomem);
  180. _leave(" = -ENOMEM");
  181. return -ENOMEM;
  182. }
  183. fscache_operation_init(op, fscache_attr_changed_op, NULL, NULL);
  184. op->flags = FSCACHE_OP_ASYNC |
  185. (1 << FSCACHE_OP_EXCLUSIVE) |
  186. (1 << FSCACHE_OP_UNUSE_COOKIE);
  187. spin_lock(&cookie->lock);
  188. if (!fscache_cookie_enabled(cookie) ||
  189. hlist_empty(&cookie->backing_objects))
  190. goto nobufs;
  191. object = hlist_entry(cookie->backing_objects.first,
  192. struct fscache_object, cookie_link);
  193. __fscache_use_cookie(cookie);
  194. if (fscache_submit_exclusive_op(object, op) < 0)
  195. goto nobufs_dec;
  196. spin_unlock(&cookie->lock);
  197. fscache_stat(&fscache_n_attr_changed_ok);
  198. fscache_put_operation(op);
  199. _leave(" = 0");
  200. return 0;
  201. nobufs_dec:
  202. wake_cookie = __fscache_unuse_cookie(cookie);
  203. nobufs:
  204. spin_unlock(&cookie->lock);
  205. fscache_put_operation(op);
  206. if (wake_cookie)
  207. __fscache_wake_unused_cookie(cookie);
  208. fscache_stat(&fscache_n_attr_changed_nobufs);
  209. _leave(" = %d", -ENOBUFS);
  210. return -ENOBUFS;
  211. }
  212. EXPORT_SYMBOL(__fscache_attr_changed);
  213. /*
  214. * Handle cancellation of a pending retrieval op
  215. */
  216. static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
  217. {
  218. struct fscache_retrieval *op =
  219. container_of(_op, struct fscache_retrieval, op);
  220. atomic_set(&op->n_pages, 0);
  221. }
  222. /*
  223. * release a retrieval op reference
  224. */
  225. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  226. {
  227. struct fscache_retrieval *op =
  228. container_of(_op, struct fscache_retrieval, op);
  229. _enter("{OP%x}", op->op.debug_id);
  230. ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
  231. atomic_read(&op->n_pages), ==, 0);
  232. fscache_hist(fscache_retrieval_histogram, op->start_time);
  233. if (op->context)
  234. fscache_put_context(op->cookie, op->context);
  235. _leave("");
  236. }
  237. /*
  238. * allocate a retrieval op
  239. */
  240. static struct fscache_retrieval *fscache_alloc_retrieval(
  241. struct fscache_cookie *cookie,
  242. struct address_space *mapping,
  243. fscache_rw_complete_t end_io_func,
  244. void *context)
  245. {
  246. struct fscache_retrieval *op;
  247. /* allocate a retrieval operation and attempt to submit it */
  248. op = kzalloc(sizeof(*op), GFP_NOIO);
  249. if (!op) {
  250. fscache_stat(&fscache_n_retrievals_nomem);
  251. return NULL;
  252. }
  253. fscache_operation_init(&op->op, NULL,
  254. fscache_do_cancel_retrieval,
  255. fscache_release_retrieval_op);
  256. op->op.flags = FSCACHE_OP_MYTHREAD |
  257. (1UL << FSCACHE_OP_WAITING) |
  258. (1UL << FSCACHE_OP_UNUSE_COOKIE);
  259. op->cookie = cookie;
  260. op->mapping = mapping;
  261. op->end_io_func = end_io_func;
  262. op->context = context;
  263. op->start_time = jiffies;
  264. INIT_LIST_HEAD(&op->to_do);
  265. /* Pin the netfs read context in case we need to do the actual netfs
  266. * read because we've encountered a cache read failure.
  267. */
  268. if (context)
  269. fscache_get_context(op->cookie, context);
  270. return op;
  271. }
  272. /*
  273. * wait for a deferred lookup to complete
  274. */
  275. int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  276. {
  277. unsigned long jif;
  278. _enter("");
  279. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  280. _leave(" = 0 [imm]");
  281. return 0;
  282. }
  283. fscache_stat(&fscache_n_retrievals_wait);
  284. jif = jiffies;
  285. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  286. TASK_INTERRUPTIBLE) != 0) {
  287. fscache_stat(&fscache_n_retrievals_intr);
  288. _leave(" = -ERESTARTSYS");
  289. return -ERESTARTSYS;
  290. }
  291. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  292. smp_rmb();
  293. fscache_hist(fscache_retrieval_delay_histogram, jif);
  294. _leave(" = 0 [dly]");
  295. return 0;
  296. }
  297. /*
  298. * wait for an object to become active (or dead)
  299. */
  300. int fscache_wait_for_operation_activation(struct fscache_object *object,
  301. struct fscache_operation *op,
  302. atomic_t *stat_op_waits,
  303. atomic_t *stat_object_dead)
  304. {
  305. int ret;
  306. if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
  307. goto check_if_dead;
  308. _debug(">>> WT");
  309. if (stat_op_waits)
  310. fscache_stat(stat_op_waits);
  311. if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
  312. TASK_INTERRUPTIBLE) != 0) {
  313. ret = fscache_cancel_op(op, false);
  314. if (ret == 0)
  315. return -ERESTARTSYS;
  316. /* it's been removed from the pending queue by another party,
  317. * so we should get to run shortly */
  318. wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
  319. TASK_UNINTERRUPTIBLE);
  320. }
  321. _debug("<<< GO");
  322. check_if_dead:
  323. if (op->state == FSCACHE_OP_ST_CANCELLED) {
  324. if (stat_object_dead)
  325. fscache_stat(stat_object_dead);
  326. _leave(" = -ENOBUFS [cancelled]");
  327. return -ENOBUFS;
  328. }
  329. if (unlikely(fscache_object_is_dying(object) ||
  330. fscache_cache_is_broken(object))) {
  331. enum fscache_operation_state state = op->state;
  332. fscache_cancel_op(op, true);
  333. if (stat_object_dead)
  334. fscache_stat(stat_object_dead);
  335. _leave(" = -ENOBUFS [obj dead %d]", state);
  336. return -ENOBUFS;
  337. }
  338. return 0;
  339. }
  340. /*
  341. * read a page from the cache or allocate a block in which to store it
  342. * - we return:
  343. * -ENOMEM - out of memory, nothing done
  344. * -ERESTARTSYS - interrupted
  345. * -ENOBUFS - no backing object available in which to cache the block
  346. * -ENODATA - no data available in the backing object for this block
  347. * 0 - dispatched a read - it'll call end_io_func() when finished
  348. */
  349. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  350. struct page *page,
  351. fscache_rw_complete_t end_io_func,
  352. void *context,
  353. gfp_t gfp)
  354. {
  355. struct fscache_retrieval *op;
  356. struct fscache_object *object;
  357. bool wake_cookie = false;
  358. int ret;
  359. _enter("%p,%p,,,", cookie, page);
  360. fscache_stat(&fscache_n_retrievals);
  361. if (hlist_empty(&cookie->backing_objects))
  362. goto nobufs;
  363. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  364. _leave(" = -ENOBUFS [invalidating]");
  365. return -ENOBUFS;
  366. }
  367. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  368. ASSERTCMP(page, !=, NULL);
  369. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  370. return -ERESTARTSYS;
  371. op = fscache_alloc_retrieval(cookie, page->mapping,
  372. end_io_func, context);
  373. if (!op) {
  374. _leave(" = -ENOMEM");
  375. return -ENOMEM;
  376. }
  377. atomic_set(&op->n_pages, 1);
  378. spin_lock(&cookie->lock);
  379. if (!fscache_cookie_enabled(cookie) ||
  380. hlist_empty(&cookie->backing_objects))
  381. goto nobufs_unlock;
  382. object = hlist_entry(cookie->backing_objects.first,
  383. struct fscache_object, cookie_link);
  384. ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
  385. __fscache_use_cookie(cookie);
  386. atomic_inc(&object->n_reads);
  387. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  388. if (fscache_submit_op(object, &op->op) < 0)
  389. goto nobufs_unlock_dec;
  390. spin_unlock(&cookie->lock);
  391. fscache_stat(&fscache_n_retrieval_ops);
  392. /* we wait for the operation to become active, and then process it
  393. * *here*, in this thread, and not in the thread pool */
  394. ret = fscache_wait_for_operation_activation(
  395. object, &op->op,
  396. __fscache_stat(&fscache_n_retrieval_op_waits),
  397. __fscache_stat(&fscache_n_retrievals_object_dead));
  398. if (ret < 0)
  399. goto error;
  400. /* ask the cache to honour the operation */
  401. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  402. fscache_stat(&fscache_n_cop_allocate_page);
  403. ret = object->cache->ops->allocate_page(op, page, gfp);
  404. fscache_stat_d(&fscache_n_cop_allocate_page);
  405. if (ret == 0)
  406. ret = -ENODATA;
  407. } else {
  408. fscache_stat(&fscache_n_cop_read_or_alloc_page);
  409. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  410. fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
  411. }
  412. error:
  413. if (ret == -ENOMEM)
  414. fscache_stat(&fscache_n_retrievals_nomem);
  415. else if (ret == -ERESTARTSYS)
  416. fscache_stat(&fscache_n_retrievals_intr);
  417. else if (ret == -ENODATA)
  418. fscache_stat(&fscache_n_retrievals_nodata);
  419. else if (ret < 0)
  420. fscache_stat(&fscache_n_retrievals_nobufs);
  421. else
  422. fscache_stat(&fscache_n_retrievals_ok);
  423. fscache_put_retrieval(op);
  424. _leave(" = %d", ret);
  425. return ret;
  426. nobufs_unlock_dec:
  427. atomic_dec(&object->n_reads);
  428. wake_cookie = __fscache_unuse_cookie(cookie);
  429. nobufs_unlock:
  430. spin_unlock(&cookie->lock);
  431. if (wake_cookie)
  432. __fscache_wake_unused_cookie(cookie);
  433. fscache_put_retrieval(op);
  434. nobufs:
  435. fscache_stat(&fscache_n_retrievals_nobufs);
  436. _leave(" = -ENOBUFS");
  437. return -ENOBUFS;
  438. }
  439. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  440. /*
  441. * read a list of page from the cache or allocate a block in which to store
  442. * them
  443. * - we return:
  444. * -ENOMEM - out of memory, some pages may be being read
  445. * -ERESTARTSYS - interrupted, some pages may be being read
  446. * -ENOBUFS - no backing object or space available in which to cache any
  447. * pages not being read
  448. * -ENODATA - no data available in the backing object for some or all of
  449. * the pages
  450. * 0 - dispatched a read on all pages
  451. *
  452. * end_io_func() will be called for each page read from the cache as it is
  453. * finishes being read
  454. *
  455. * any pages for which a read is dispatched will be removed from pages and
  456. * nr_pages
  457. */
  458. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  459. struct address_space *mapping,
  460. struct list_head *pages,
  461. unsigned *nr_pages,
  462. fscache_rw_complete_t end_io_func,
  463. void *context,
  464. gfp_t gfp)
  465. {
  466. struct fscache_retrieval *op;
  467. struct fscache_object *object;
  468. bool wake_cookie = false;
  469. int ret;
  470. _enter("%p,,%d,,,", cookie, *nr_pages);
  471. fscache_stat(&fscache_n_retrievals);
  472. if (hlist_empty(&cookie->backing_objects))
  473. goto nobufs;
  474. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  475. _leave(" = -ENOBUFS [invalidating]");
  476. return -ENOBUFS;
  477. }
  478. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  479. ASSERTCMP(*nr_pages, >, 0);
  480. ASSERT(!list_empty(pages));
  481. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  482. return -ERESTARTSYS;
  483. op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
  484. if (!op)
  485. return -ENOMEM;
  486. atomic_set(&op->n_pages, *nr_pages);
  487. spin_lock(&cookie->lock);
  488. if (!fscache_cookie_enabled(cookie) ||
  489. hlist_empty(&cookie->backing_objects))
  490. goto nobufs_unlock;
  491. object = hlist_entry(cookie->backing_objects.first,
  492. struct fscache_object, cookie_link);
  493. __fscache_use_cookie(cookie);
  494. atomic_inc(&object->n_reads);
  495. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  496. if (fscache_submit_op(object, &op->op) < 0)
  497. goto nobufs_unlock_dec;
  498. spin_unlock(&cookie->lock);
  499. fscache_stat(&fscache_n_retrieval_ops);
  500. /* we wait for the operation to become active, and then process it
  501. * *here*, in this thread, and not in the thread pool */
  502. ret = fscache_wait_for_operation_activation(
  503. object, &op->op,
  504. __fscache_stat(&fscache_n_retrieval_op_waits),
  505. __fscache_stat(&fscache_n_retrievals_object_dead));
  506. if (ret < 0)
  507. goto error;
  508. /* ask the cache to honour the operation */
  509. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  510. fscache_stat(&fscache_n_cop_allocate_pages);
  511. ret = object->cache->ops->allocate_pages(
  512. op, pages, nr_pages, gfp);
  513. fscache_stat_d(&fscache_n_cop_allocate_pages);
  514. } else {
  515. fscache_stat(&fscache_n_cop_read_or_alloc_pages);
  516. ret = object->cache->ops->read_or_alloc_pages(
  517. op, pages, nr_pages, gfp);
  518. fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
  519. }
  520. error:
  521. if (ret == -ENOMEM)
  522. fscache_stat(&fscache_n_retrievals_nomem);
  523. else if (ret == -ERESTARTSYS)
  524. fscache_stat(&fscache_n_retrievals_intr);
  525. else if (ret == -ENODATA)
  526. fscache_stat(&fscache_n_retrievals_nodata);
  527. else if (ret < 0)
  528. fscache_stat(&fscache_n_retrievals_nobufs);
  529. else
  530. fscache_stat(&fscache_n_retrievals_ok);
  531. fscache_put_retrieval(op);
  532. _leave(" = %d", ret);
  533. return ret;
  534. nobufs_unlock_dec:
  535. atomic_dec(&object->n_reads);
  536. wake_cookie = __fscache_unuse_cookie(cookie);
  537. nobufs_unlock:
  538. spin_unlock(&cookie->lock);
  539. fscache_put_retrieval(op);
  540. if (wake_cookie)
  541. __fscache_wake_unused_cookie(cookie);
  542. nobufs:
  543. fscache_stat(&fscache_n_retrievals_nobufs);
  544. _leave(" = -ENOBUFS");
  545. return -ENOBUFS;
  546. }
  547. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  548. /*
  549. * allocate a block in the cache on which to store a page
  550. * - we return:
  551. * -ENOMEM - out of memory, nothing done
  552. * -ERESTARTSYS - interrupted
  553. * -ENOBUFS - no backing object available in which to cache the block
  554. * 0 - block allocated
  555. */
  556. int __fscache_alloc_page(struct fscache_cookie *cookie,
  557. struct page *page,
  558. gfp_t gfp)
  559. {
  560. struct fscache_retrieval *op;
  561. struct fscache_object *object;
  562. bool wake_cookie = false;
  563. int ret;
  564. _enter("%p,%p,,,", cookie, page);
  565. fscache_stat(&fscache_n_allocs);
  566. if (hlist_empty(&cookie->backing_objects))
  567. goto nobufs;
  568. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  569. ASSERTCMP(page, !=, NULL);
  570. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  571. _leave(" = -ENOBUFS [invalidating]");
  572. return -ENOBUFS;
  573. }
  574. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  575. return -ERESTARTSYS;
  576. op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
  577. if (!op)
  578. return -ENOMEM;
  579. atomic_set(&op->n_pages, 1);
  580. spin_lock(&cookie->lock);
  581. if (!fscache_cookie_enabled(cookie) ||
  582. hlist_empty(&cookie->backing_objects))
  583. goto nobufs_unlock;
  584. object = hlist_entry(cookie->backing_objects.first,
  585. struct fscache_object, cookie_link);
  586. __fscache_use_cookie(cookie);
  587. if (fscache_submit_op(object, &op->op) < 0)
  588. goto nobufs_unlock_dec;
  589. spin_unlock(&cookie->lock);
  590. fscache_stat(&fscache_n_alloc_ops);
  591. ret = fscache_wait_for_operation_activation(
  592. object, &op->op,
  593. __fscache_stat(&fscache_n_alloc_op_waits),
  594. __fscache_stat(&fscache_n_allocs_object_dead));
  595. if (ret < 0)
  596. goto error;
  597. /* ask the cache to honour the operation */
  598. fscache_stat(&fscache_n_cop_allocate_page);
  599. ret = object->cache->ops->allocate_page(op, page, gfp);
  600. fscache_stat_d(&fscache_n_cop_allocate_page);
  601. error:
  602. if (ret == -ERESTARTSYS)
  603. fscache_stat(&fscache_n_allocs_intr);
  604. else if (ret < 0)
  605. fscache_stat(&fscache_n_allocs_nobufs);
  606. else
  607. fscache_stat(&fscache_n_allocs_ok);
  608. fscache_put_retrieval(op);
  609. _leave(" = %d", ret);
  610. return ret;
  611. nobufs_unlock_dec:
  612. wake_cookie = __fscache_unuse_cookie(cookie);
  613. nobufs_unlock:
  614. spin_unlock(&cookie->lock);
  615. fscache_put_retrieval(op);
  616. if (wake_cookie)
  617. __fscache_wake_unused_cookie(cookie);
  618. nobufs:
  619. fscache_stat(&fscache_n_allocs_nobufs);
  620. _leave(" = -ENOBUFS");
  621. return -ENOBUFS;
  622. }
  623. EXPORT_SYMBOL(__fscache_alloc_page);
  624. /*
  625. * Unmark pages allocate in the readahead code path (via:
  626. * fscache_readpages_or_alloc) after delegating to the base filesystem
  627. */
  628. void __fscache_readpages_cancel(struct fscache_cookie *cookie,
  629. struct list_head *pages)
  630. {
  631. struct page *page;
  632. list_for_each_entry(page, pages, lru) {
  633. if (PageFsCache(page))
  634. __fscache_uncache_page(cookie, page);
  635. }
  636. }
  637. EXPORT_SYMBOL(__fscache_readpages_cancel);
  638. /*
  639. * release a write op reference
  640. */
  641. static void fscache_release_write_op(struct fscache_operation *_op)
  642. {
  643. _enter("{OP%x}", _op->debug_id);
  644. }
  645. /*
  646. * perform the background storage of a page into the cache
  647. */
  648. static void fscache_write_op(struct fscache_operation *_op)
  649. {
  650. struct fscache_storage *op =
  651. container_of(_op, struct fscache_storage, op);
  652. struct fscache_object *object = op->op.object;
  653. struct fscache_cookie *cookie;
  654. struct page *page;
  655. unsigned n;
  656. void *results[1];
  657. int ret;
  658. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  659. spin_lock(&object->lock);
  660. cookie = object->cookie;
  661. if (!fscache_object_is_active(object)) {
  662. /* If we get here, then the on-disk cache object likely longer
  663. * exists, so we should just cancel this write operation.
  664. */
  665. spin_unlock(&object->lock);
  666. fscache_op_complete(&op->op, false);
  667. _leave(" [inactive]");
  668. return;
  669. }
  670. if (!cookie) {
  671. /* If we get here, then the cookie belonging to the object was
  672. * detached, probably by the cookie being withdrawn due to
  673. * memory pressure, which means that the pages we might write
  674. * to the cache from no longer exist - therefore, we can just
  675. * cancel this write operation.
  676. */
  677. spin_unlock(&object->lock);
  678. fscache_op_complete(&op->op, false);
  679. _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
  680. _op->flags, _op->state, object->state->short_name,
  681. object->flags);
  682. return;
  683. }
  684. spin_lock(&cookie->stores_lock);
  685. fscache_stat(&fscache_n_store_calls);
  686. /* find a page to store */
  687. page = NULL;
  688. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  689. FSCACHE_COOKIE_PENDING_TAG);
  690. if (n != 1)
  691. goto superseded;
  692. page = results[0];
  693. _debug("gang %d [%lx]", n, page->index);
  694. if (page->index > op->store_limit) {
  695. fscache_stat(&fscache_n_store_pages_over_limit);
  696. goto superseded;
  697. }
  698. radix_tree_tag_set(&cookie->stores, page->index,
  699. FSCACHE_COOKIE_STORING_TAG);
  700. radix_tree_tag_clear(&cookie->stores, page->index,
  701. FSCACHE_COOKIE_PENDING_TAG);
  702. spin_unlock(&cookie->stores_lock);
  703. spin_unlock(&object->lock);
  704. fscache_stat(&fscache_n_store_pages);
  705. fscache_stat(&fscache_n_cop_write_page);
  706. ret = object->cache->ops->write_page(op, page);
  707. fscache_stat_d(&fscache_n_cop_write_page);
  708. fscache_end_page_write(object, page);
  709. if (ret < 0) {
  710. fscache_abort_object(object);
  711. fscache_op_complete(&op->op, true);
  712. } else {
  713. fscache_enqueue_operation(&op->op);
  714. }
  715. _leave("");
  716. return;
  717. superseded:
  718. /* this writer is going away and there aren't any more things to
  719. * write */
  720. _debug("cease");
  721. spin_unlock(&cookie->stores_lock);
  722. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  723. spin_unlock(&object->lock);
  724. fscache_op_complete(&op->op, true);
  725. _leave("");
  726. }
  727. /*
  728. * Clear the pages pending writing for invalidation
  729. */
  730. void fscache_invalidate_writes(struct fscache_cookie *cookie)
  731. {
  732. struct page *page;
  733. void *results[16];
  734. int n, i;
  735. _enter("");
  736. for (;;) {
  737. spin_lock(&cookie->stores_lock);
  738. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
  739. ARRAY_SIZE(results),
  740. FSCACHE_COOKIE_PENDING_TAG);
  741. if (n == 0) {
  742. spin_unlock(&cookie->stores_lock);
  743. break;
  744. }
  745. for (i = n - 1; i >= 0; i--) {
  746. page = results[i];
  747. radix_tree_delete(&cookie->stores, page->index);
  748. }
  749. spin_unlock(&cookie->stores_lock);
  750. for (i = n - 1; i >= 0; i--)
  751. page_cache_release(results[i]);
  752. }
  753. _leave("");
  754. }
  755. /*
  756. * request a page be stored in the cache
  757. * - returns:
  758. * -ENOMEM - out of memory, nothing done
  759. * -ENOBUFS - no backing object available in which to cache the page
  760. * 0 - dispatched a write - it'll call end_io_func() when finished
  761. *
  762. * if the cookie still has a backing object at this point, that object can be
  763. * in one of a few states with respect to storage processing:
  764. *
  765. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  766. * set)
  767. *
  768. * (a) no writes yet
  769. *
  770. * (b) writes deferred till post-creation (mark page for writing and
  771. * return immediately)
  772. *
  773. * (2) negative lookup, object created, initial fill being made from netfs
  774. *
  775. * (a) fill point not yet reached this page (mark page for writing and
  776. * return)
  777. *
  778. * (b) fill point passed this page (queue op to store this page)
  779. *
  780. * (3) object extant (queue op to store this page)
  781. *
  782. * any other state is invalid
  783. */
  784. int __fscache_write_page(struct fscache_cookie *cookie,
  785. struct page *page,
  786. gfp_t gfp)
  787. {
  788. struct fscache_storage *op;
  789. struct fscache_object *object;
  790. bool wake_cookie = false;
  791. int ret;
  792. _enter("%p,%x,", cookie, (u32) page->flags);
  793. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  794. ASSERT(PageFsCache(page));
  795. fscache_stat(&fscache_n_stores);
  796. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  797. _leave(" = -ENOBUFS [invalidating]");
  798. return -ENOBUFS;
  799. }
  800. op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
  801. if (!op)
  802. goto nomem;
  803. fscache_operation_init(&op->op, fscache_write_op, NULL,
  804. fscache_release_write_op);
  805. op->op.flags = FSCACHE_OP_ASYNC |
  806. (1 << FSCACHE_OP_WAITING) |
  807. (1 << FSCACHE_OP_UNUSE_COOKIE);
  808. ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
  809. if (ret < 0)
  810. goto nomem_free;
  811. ret = -ENOBUFS;
  812. spin_lock(&cookie->lock);
  813. if (!fscache_cookie_enabled(cookie) ||
  814. hlist_empty(&cookie->backing_objects))
  815. goto nobufs;
  816. object = hlist_entry(cookie->backing_objects.first,
  817. struct fscache_object, cookie_link);
  818. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  819. goto nobufs;
  820. /* add the page to the pending-storage radix tree on the backing
  821. * object */
  822. spin_lock(&object->lock);
  823. spin_lock(&cookie->stores_lock);
  824. _debug("store limit %llx", (unsigned long long) object->store_limit);
  825. ret = radix_tree_insert(&cookie->stores, page->index, page);
  826. if (ret < 0) {
  827. if (ret == -EEXIST)
  828. goto already_queued;
  829. _debug("insert failed %d", ret);
  830. goto nobufs_unlock_obj;
  831. }
  832. radix_tree_tag_set(&cookie->stores, page->index,
  833. FSCACHE_COOKIE_PENDING_TAG);
  834. page_cache_get(page);
  835. /* we only want one writer at a time, but we do need to queue new
  836. * writers after exclusive ops */
  837. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  838. goto already_pending;
  839. spin_unlock(&cookie->stores_lock);
  840. spin_unlock(&object->lock);
  841. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  842. op->store_limit = object->store_limit;
  843. __fscache_use_cookie(cookie);
  844. if (fscache_submit_op(object, &op->op) < 0)
  845. goto submit_failed;
  846. spin_unlock(&cookie->lock);
  847. radix_tree_preload_end();
  848. fscache_stat(&fscache_n_store_ops);
  849. fscache_stat(&fscache_n_stores_ok);
  850. /* the work queue now carries its own ref on the object */
  851. fscache_put_operation(&op->op);
  852. _leave(" = 0");
  853. return 0;
  854. already_queued:
  855. fscache_stat(&fscache_n_stores_again);
  856. already_pending:
  857. spin_unlock(&cookie->stores_lock);
  858. spin_unlock(&object->lock);
  859. spin_unlock(&cookie->lock);
  860. radix_tree_preload_end();
  861. fscache_put_operation(&op->op);
  862. fscache_stat(&fscache_n_stores_ok);
  863. _leave(" = 0");
  864. return 0;
  865. submit_failed:
  866. spin_lock(&cookie->stores_lock);
  867. radix_tree_delete(&cookie->stores, page->index);
  868. spin_unlock(&cookie->stores_lock);
  869. wake_cookie = __fscache_unuse_cookie(cookie);
  870. page_cache_release(page);
  871. ret = -ENOBUFS;
  872. goto nobufs;
  873. nobufs_unlock_obj:
  874. spin_unlock(&cookie->stores_lock);
  875. spin_unlock(&object->lock);
  876. nobufs:
  877. spin_unlock(&cookie->lock);
  878. radix_tree_preload_end();
  879. fscache_put_operation(&op->op);
  880. if (wake_cookie)
  881. __fscache_wake_unused_cookie(cookie);
  882. fscache_stat(&fscache_n_stores_nobufs);
  883. _leave(" = -ENOBUFS");
  884. return -ENOBUFS;
  885. nomem_free:
  886. fscache_put_operation(&op->op);
  887. nomem:
  888. fscache_stat(&fscache_n_stores_oom);
  889. _leave(" = -ENOMEM");
  890. return -ENOMEM;
  891. }
  892. EXPORT_SYMBOL(__fscache_write_page);
  893. /*
  894. * remove a page from the cache
  895. */
  896. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  897. {
  898. struct fscache_object *object;
  899. _enter(",%p", page);
  900. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  901. ASSERTCMP(page, !=, NULL);
  902. fscache_stat(&fscache_n_uncaches);
  903. /* cache withdrawal may beat us to it */
  904. if (!PageFsCache(page))
  905. goto done;
  906. /* get the object */
  907. spin_lock(&cookie->lock);
  908. if (hlist_empty(&cookie->backing_objects)) {
  909. ClearPageFsCache(page);
  910. goto done_unlock;
  911. }
  912. object = hlist_entry(cookie->backing_objects.first,
  913. struct fscache_object, cookie_link);
  914. /* there might now be stuff on disk we could read */
  915. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  916. /* only invoke the cache backend if we managed to mark the page
  917. * uncached here; this deals with synchronisation vs withdrawal */
  918. if (TestClearPageFsCache(page) &&
  919. object->cache->ops->uncache_page) {
  920. /* the cache backend releases the cookie lock */
  921. fscache_stat(&fscache_n_cop_uncache_page);
  922. object->cache->ops->uncache_page(object, page);
  923. fscache_stat_d(&fscache_n_cop_uncache_page);
  924. goto done;
  925. }
  926. done_unlock:
  927. spin_unlock(&cookie->lock);
  928. done:
  929. _leave("");
  930. }
  931. EXPORT_SYMBOL(__fscache_uncache_page);
  932. /**
  933. * fscache_mark_page_cached - Mark a page as being cached
  934. * @op: The retrieval op pages are being marked for
  935. * @page: The page to be marked
  936. *
  937. * Mark a netfs page as being cached. After this is called, the netfs
  938. * must call fscache_uncache_page() to remove the mark.
  939. */
  940. void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
  941. {
  942. struct fscache_cookie *cookie = op->op.object->cookie;
  943. #ifdef CONFIG_FSCACHE_STATS
  944. atomic_inc(&fscache_n_marks);
  945. #endif
  946. _debug("- mark %p{%lx}", page, page->index);
  947. if (TestSetPageFsCache(page)) {
  948. static bool once_only;
  949. if (!once_only) {
  950. once_only = true;
  951. pr_warn("Cookie type %s marked page %lx multiple times\n",
  952. cookie->def->name, page->index);
  953. }
  954. }
  955. if (cookie->def->mark_page_cached)
  956. cookie->def->mark_page_cached(cookie->netfs_data,
  957. op->mapping, page);
  958. }
  959. EXPORT_SYMBOL(fscache_mark_page_cached);
  960. /**
  961. * fscache_mark_pages_cached - Mark pages as being cached
  962. * @op: The retrieval op pages are being marked for
  963. * @pagevec: The pages to be marked
  964. *
  965. * Mark a bunch of netfs pages as being cached. After this is called,
  966. * the netfs must call fscache_uncache_page() to remove the mark.
  967. */
  968. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  969. struct pagevec *pagevec)
  970. {
  971. unsigned long loop;
  972. for (loop = 0; loop < pagevec->nr; loop++)
  973. fscache_mark_page_cached(op, pagevec->pages[loop]);
  974. pagevec_reinit(pagevec);
  975. }
  976. EXPORT_SYMBOL(fscache_mark_pages_cached);
  977. /*
  978. * Uncache all the pages in an inode that are marked PG_fscache, assuming them
  979. * to be associated with the given cookie.
  980. */
  981. void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
  982. struct inode *inode)
  983. {
  984. struct address_space *mapping = inode->i_mapping;
  985. struct pagevec pvec;
  986. pgoff_t next;
  987. int i;
  988. _enter("%p,%p", cookie, inode);
  989. if (!mapping || mapping->nrpages == 0) {
  990. _leave(" [no pages]");
  991. return;
  992. }
  993. pagevec_init(&pvec, 0);
  994. next = 0;
  995. do {
  996. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
  997. break;
  998. for (i = 0; i < pagevec_count(&pvec); i++) {
  999. struct page *page = pvec.pages[i];
  1000. next = page->index;
  1001. if (PageFsCache(page)) {
  1002. __fscache_wait_on_page_write(cookie, page);
  1003. __fscache_uncache_page(cookie, page);
  1004. }
  1005. }
  1006. pagevec_release(&pvec);
  1007. cond_resched();
  1008. } while (++next);
  1009. _leave("");
  1010. }
  1011. EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);