page.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. /*
  19. * check to see if a page is being written to the cache
  20. */
  21. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  22. {
  23. void *val;
  24. rcu_read_lock();
  25. val = radix_tree_lookup(&cookie->stores, page->index);
  26. rcu_read_unlock();
  27. return val != NULL;
  28. }
  29. EXPORT_SYMBOL(__fscache_check_page_write);
  30. /*
  31. * wait for a page to finish being written to the cache
  32. */
  33. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  34. {
  35. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  36. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  37. }
  38. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  39. /*
  40. * decide whether a page can be released, possibly by cancelling a store to it
  41. * - we're allowed to sleep if __GFP_WAIT is flagged
  42. */
  43. bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
  44. struct page *page,
  45. gfp_t gfp)
  46. {
  47. struct page *xpage;
  48. void *val;
  49. _enter("%p,%p,%x", cookie, page, gfp);
  50. try_again:
  51. rcu_read_lock();
  52. val = radix_tree_lookup(&cookie->stores, page->index);
  53. if (!val) {
  54. rcu_read_unlock();
  55. fscache_stat(&fscache_n_store_vmscan_not_storing);
  56. __fscache_uncache_page(cookie, page);
  57. return true;
  58. }
  59. /* see if the page is actually undergoing storage - if so we can't get
  60. * rid of it till the cache has finished with it */
  61. if (radix_tree_tag_get(&cookie->stores, page->index,
  62. FSCACHE_COOKIE_STORING_TAG)) {
  63. rcu_read_unlock();
  64. goto page_busy;
  65. }
  66. /* the page is pending storage, so we attempt to cancel the store and
  67. * discard the store request so that the page can be reclaimed */
  68. spin_lock(&cookie->stores_lock);
  69. rcu_read_unlock();
  70. if (radix_tree_tag_get(&cookie->stores, page->index,
  71. FSCACHE_COOKIE_STORING_TAG)) {
  72. /* the page started to undergo storage whilst we were looking,
  73. * so now we can only wait or return */
  74. spin_unlock(&cookie->stores_lock);
  75. goto page_busy;
  76. }
  77. xpage = radix_tree_delete(&cookie->stores, page->index);
  78. spin_unlock(&cookie->stores_lock);
  79. if (xpage) {
  80. fscache_stat(&fscache_n_store_vmscan_cancelled);
  81. fscache_stat(&fscache_n_store_radix_deletes);
  82. ASSERTCMP(xpage, ==, page);
  83. } else {
  84. fscache_stat(&fscache_n_store_vmscan_gone);
  85. }
  86. wake_up_bit(&cookie->flags, 0);
  87. if (xpage)
  88. page_cache_release(xpage);
  89. __fscache_uncache_page(cookie, page);
  90. return true;
  91. page_busy:
  92. /* We will wait here if we're allowed to, but that could deadlock the
  93. * allocator as the work threads writing to the cache may all end up
  94. * sleeping on memory allocation, so we may need to impose a timeout
  95. * too. */
  96. if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
  97. fscache_stat(&fscache_n_store_vmscan_busy);
  98. return false;
  99. }
  100. fscache_stat(&fscache_n_store_vmscan_wait);
  101. __fscache_wait_on_page_write(cookie, page);
  102. gfp &= ~__GFP_WAIT;
  103. goto try_again;
  104. }
  105. EXPORT_SYMBOL(__fscache_maybe_release_page);
  106. /*
  107. * note that a page has finished being written to the cache
  108. */
  109. static void fscache_end_page_write(struct fscache_object *object,
  110. struct page *page)
  111. {
  112. struct fscache_cookie *cookie;
  113. struct page *xpage = NULL;
  114. spin_lock(&object->lock);
  115. cookie = object->cookie;
  116. if (cookie) {
  117. /* delete the page from the tree if it is now no longer
  118. * pending */
  119. spin_lock(&cookie->stores_lock);
  120. radix_tree_tag_clear(&cookie->stores, page->index,
  121. FSCACHE_COOKIE_STORING_TAG);
  122. if (!radix_tree_tag_get(&cookie->stores, page->index,
  123. FSCACHE_COOKIE_PENDING_TAG)) {
  124. fscache_stat(&fscache_n_store_radix_deletes);
  125. xpage = radix_tree_delete(&cookie->stores, page->index);
  126. }
  127. spin_unlock(&cookie->stores_lock);
  128. wake_up_bit(&cookie->flags, 0);
  129. }
  130. spin_unlock(&object->lock);
  131. if (xpage)
  132. page_cache_release(xpage);
  133. }
  134. /*
  135. * actually apply the changed attributes to a cache object
  136. */
  137. static void fscache_attr_changed_op(struct fscache_operation *op)
  138. {
  139. struct fscache_object *object = op->object;
  140. int ret;
  141. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  142. fscache_stat(&fscache_n_attr_changed_calls);
  143. if (fscache_object_is_active(object)) {
  144. fscache_stat(&fscache_n_cop_attr_changed);
  145. ret = object->cache->ops->attr_changed(object);
  146. fscache_stat_d(&fscache_n_cop_attr_changed);
  147. if (ret < 0)
  148. fscache_abort_object(object);
  149. }
  150. fscache_op_complete(op, true);
  151. _leave("");
  152. }
  153. /*
  154. * notification that the attributes on an object have changed
  155. */
  156. int __fscache_attr_changed(struct fscache_cookie *cookie)
  157. {
  158. struct fscache_operation *op;
  159. struct fscache_object *object;
  160. bool wake_cookie;
  161. _enter("%p", cookie);
  162. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  163. fscache_stat(&fscache_n_attr_changed);
  164. op = kzalloc(sizeof(*op), GFP_KERNEL);
  165. if (!op) {
  166. fscache_stat(&fscache_n_attr_changed_nomem);
  167. _leave(" = -ENOMEM");
  168. return -ENOMEM;
  169. }
  170. fscache_operation_init(op, fscache_attr_changed_op, NULL);
  171. op->flags = FSCACHE_OP_ASYNC |
  172. (1 << FSCACHE_OP_EXCLUSIVE) |
  173. (1 << FSCACHE_OP_UNUSE_COOKIE);
  174. spin_lock(&cookie->lock);
  175. if (!fscache_cookie_enabled(cookie) ||
  176. hlist_empty(&cookie->backing_objects))
  177. goto nobufs;
  178. object = hlist_entry(cookie->backing_objects.first,
  179. struct fscache_object, cookie_link);
  180. __fscache_use_cookie(cookie);
  181. if (fscache_submit_exclusive_op(object, op) < 0)
  182. goto nobufs;
  183. spin_unlock(&cookie->lock);
  184. fscache_stat(&fscache_n_attr_changed_ok);
  185. fscache_put_operation(op);
  186. _leave(" = 0");
  187. return 0;
  188. nobufs:
  189. wake_cookie = __fscache_unuse_cookie(cookie);
  190. spin_unlock(&cookie->lock);
  191. kfree(op);
  192. if (wake_cookie)
  193. __fscache_wake_unused_cookie(cookie);
  194. fscache_stat(&fscache_n_attr_changed_nobufs);
  195. _leave(" = %d", -ENOBUFS);
  196. return -ENOBUFS;
  197. }
  198. EXPORT_SYMBOL(__fscache_attr_changed);
  199. /*
  200. * release a retrieval op reference
  201. */
  202. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  203. {
  204. struct fscache_retrieval *op =
  205. container_of(_op, struct fscache_retrieval, op);
  206. _enter("{OP%x}", op->op.debug_id);
  207. ASSERTCMP(atomic_read(&op->n_pages), ==, 0);
  208. fscache_hist(fscache_retrieval_histogram, op->start_time);
  209. if (op->context)
  210. fscache_put_context(op->op.object->cookie, op->context);
  211. _leave("");
  212. }
  213. /*
  214. * allocate a retrieval op
  215. */
  216. static struct fscache_retrieval *fscache_alloc_retrieval(
  217. struct fscache_cookie *cookie,
  218. struct address_space *mapping,
  219. fscache_rw_complete_t end_io_func,
  220. void *context)
  221. {
  222. struct fscache_retrieval *op;
  223. /* allocate a retrieval operation and attempt to submit it */
  224. op = kzalloc(sizeof(*op), GFP_NOIO);
  225. if (!op) {
  226. fscache_stat(&fscache_n_retrievals_nomem);
  227. return NULL;
  228. }
  229. fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
  230. op->op.flags = FSCACHE_OP_MYTHREAD |
  231. (1UL << FSCACHE_OP_WAITING) |
  232. (1UL << FSCACHE_OP_UNUSE_COOKIE);
  233. op->mapping = mapping;
  234. op->end_io_func = end_io_func;
  235. op->context = context;
  236. op->start_time = jiffies;
  237. INIT_LIST_HEAD(&op->to_do);
  238. return op;
  239. }
  240. /*
  241. * wait for a deferred lookup to complete
  242. */
  243. int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  244. {
  245. unsigned long jif;
  246. _enter("");
  247. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  248. _leave(" = 0 [imm]");
  249. return 0;
  250. }
  251. fscache_stat(&fscache_n_retrievals_wait);
  252. jif = jiffies;
  253. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  254. fscache_wait_bit_interruptible,
  255. TASK_INTERRUPTIBLE) != 0) {
  256. fscache_stat(&fscache_n_retrievals_intr);
  257. _leave(" = -ERESTARTSYS");
  258. return -ERESTARTSYS;
  259. }
  260. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  261. smp_rmb();
  262. fscache_hist(fscache_retrieval_delay_histogram, jif);
  263. _leave(" = 0 [dly]");
  264. return 0;
  265. }
  266. /*
  267. * Handle cancellation of a pending retrieval op
  268. */
  269. static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
  270. {
  271. struct fscache_retrieval *op =
  272. container_of(_op, struct fscache_retrieval, op);
  273. atomic_set(&op->n_pages, 0);
  274. }
  275. /*
  276. * wait for an object to become active (or dead)
  277. */
  278. int fscache_wait_for_operation_activation(struct fscache_object *object,
  279. struct fscache_operation *op,
  280. atomic_t *stat_op_waits,
  281. atomic_t *stat_object_dead,
  282. void (*do_cancel)(struct fscache_operation *))
  283. {
  284. int ret;
  285. if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
  286. goto check_if_dead;
  287. _debug(">>> WT");
  288. if (stat_op_waits)
  289. fscache_stat(stat_op_waits);
  290. if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
  291. fscache_wait_bit_interruptible,
  292. TASK_INTERRUPTIBLE) != 0) {
  293. ret = fscache_cancel_op(op, do_cancel);
  294. if (ret == 0)
  295. return -ERESTARTSYS;
  296. /* it's been removed from the pending queue by another party,
  297. * so we should get to run shortly */
  298. wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
  299. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  300. }
  301. _debug("<<< GO");
  302. check_if_dead:
  303. if (op->state == FSCACHE_OP_ST_CANCELLED) {
  304. if (stat_object_dead)
  305. fscache_stat(stat_object_dead);
  306. _leave(" = -ENOBUFS [cancelled]");
  307. return -ENOBUFS;
  308. }
  309. if (unlikely(fscache_object_is_dead(object))) {
  310. pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
  311. fscache_cancel_op(op, do_cancel);
  312. if (stat_object_dead)
  313. fscache_stat(stat_object_dead);
  314. return -ENOBUFS;
  315. }
  316. return 0;
  317. }
  318. /*
  319. * read a page from the cache or allocate a block in which to store it
  320. * - we return:
  321. * -ENOMEM - out of memory, nothing done
  322. * -ERESTARTSYS - interrupted
  323. * -ENOBUFS - no backing object available in which to cache the block
  324. * -ENODATA - no data available in the backing object for this block
  325. * 0 - dispatched a read - it'll call end_io_func() when finished
  326. */
  327. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  328. struct page *page,
  329. fscache_rw_complete_t end_io_func,
  330. void *context,
  331. gfp_t gfp)
  332. {
  333. struct fscache_retrieval *op;
  334. struct fscache_object *object;
  335. bool wake_cookie = false;
  336. int ret;
  337. _enter("%p,%p,,,", cookie, page);
  338. fscache_stat(&fscache_n_retrievals);
  339. if (hlist_empty(&cookie->backing_objects))
  340. goto nobufs;
  341. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  342. _leave(" = -ENOBUFS [invalidating]");
  343. return -ENOBUFS;
  344. }
  345. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  346. ASSERTCMP(page, !=, NULL);
  347. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  348. return -ERESTARTSYS;
  349. op = fscache_alloc_retrieval(cookie, page->mapping,
  350. end_io_func, context);
  351. if (!op) {
  352. _leave(" = -ENOMEM");
  353. return -ENOMEM;
  354. }
  355. atomic_set(&op->n_pages, 1);
  356. spin_lock(&cookie->lock);
  357. if (!fscache_cookie_enabled(cookie) ||
  358. hlist_empty(&cookie->backing_objects))
  359. goto nobufs_unlock;
  360. object = hlist_entry(cookie->backing_objects.first,
  361. struct fscache_object, cookie_link);
  362. ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
  363. __fscache_use_cookie(cookie);
  364. atomic_inc(&object->n_reads);
  365. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  366. if (fscache_submit_op(object, &op->op) < 0)
  367. goto nobufs_unlock_dec;
  368. spin_unlock(&cookie->lock);
  369. fscache_stat(&fscache_n_retrieval_ops);
  370. /* pin the netfs read context in case we need to do the actual netfs
  371. * read because we've encountered a cache read failure */
  372. fscache_get_context(object->cookie, op->context);
  373. /* we wait for the operation to become active, and then process it
  374. * *here*, in this thread, and not in the thread pool */
  375. ret = fscache_wait_for_operation_activation(
  376. object, &op->op,
  377. __fscache_stat(&fscache_n_retrieval_op_waits),
  378. __fscache_stat(&fscache_n_retrievals_object_dead),
  379. fscache_do_cancel_retrieval);
  380. if (ret < 0)
  381. goto error;
  382. /* ask the cache to honour the operation */
  383. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  384. fscache_stat(&fscache_n_cop_allocate_page);
  385. ret = object->cache->ops->allocate_page(op, page, gfp);
  386. fscache_stat_d(&fscache_n_cop_allocate_page);
  387. if (ret == 0)
  388. ret = -ENODATA;
  389. } else {
  390. fscache_stat(&fscache_n_cop_read_or_alloc_page);
  391. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  392. fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
  393. }
  394. error:
  395. if (ret == -ENOMEM)
  396. fscache_stat(&fscache_n_retrievals_nomem);
  397. else if (ret == -ERESTARTSYS)
  398. fscache_stat(&fscache_n_retrievals_intr);
  399. else if (ret == -ENODATA)
  400. fscache_stat(&fscache_n_retrievals_nodata);
  401. else if (ret < 0)
  402. fscache_stat(&fscache_n_retrievals_nobufs);
  403. else
  404. fscache_stat(&fscache_n_retrievals_ok);
  405. fscache_put_retrieval(op);
  406. _leave(" = %d", ret);
  407. return ret;
  408. nobufs_unlock_dec:
  409. atomic_dec(&object->n_reads);
  410. wake_cookie = __fscache_unuse_cookie(cookie);
  411. nobufs_unlock:
  412. spin_unlock(&cookie->lock);
  413. if (wake_cookie)
  414. __fscache_wake_unused_cookie(cookie);
  415. kfree(op);
  416. nobufs:
  417. fscache_stat(&fscache_n_retrievals_nobufs);
  418. _leave(" = -ENOBUFS");
  419. return -ENOBUFS;
  420. }
  421. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  422. /*
  423. * read a list of page from the cache or allocate a block in which to store
  424. * them
  425. * - we return:
  426. * -ENOMEM - out of memory, some pages may be being read
  427. * -ERESTARTSYS - interrupted, some pages may be being read
  428. * -ENOBUFS - no backing object or space available in which to cache any
  429. * pages not being read
  430. * -ENODATA - no data available in the backing object for some or all of
  431. * the pages
  432. * 0 - dispatched a read on all pages
  433. *
  434. * end_io_func() will be called for each page read from the cache as it is
  435. * finishes being read
  436. *
  437. * any pages for which a read is dispatched will be removed from pages and
  438. * nr_pages
  439. */
  440. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  441. struct address_space *mapping,
  442. struct list_head *pages,
  443. unsigned *nr_pages,
  444. fscache_rw_complete_t end_io_func,
  445. void *context,
  446. gfp_t gfp)
  447. {
  448. struct fscache_retrieval *op;
  449. struct fscache_object *object;
  450. bool wake_cookie = false;
  451. int ret;
  452. _enter("%p,,%d,,,", cookie, *nr_pages);
  453. fscache_stat(&fscache_n_retrievals);
  454. if (hlist_empty(&cookie->backing_objects))
  455. goto nobufs;
  456. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  457. _leave(" = -ENOBUFS [invalidating]");
  458. return -ENOBUFS;
  459. }
  460. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  461. ASSERTCMP(*nr_pages, >, 0);
  462. ASSERT(!list_empty(pages));
  463. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  464. return -ERESTARTSYS;
  465. op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
  466. if (!op)
  467. return -ENOMEM;
  468. atomic_set(&op->n_pages, *nr_pages);
  469. spin_lock(&cookie->lock);
  470. if (!fscache_cookie_enabled(cookie) ||
  471. hlist_empty(&cookie->backing_objects))
  472. goto nobufs_unlock;
  473. object = hlist_entry(cookie->backing_objects.first,
  474. struct fscache_object, cookie_link);
  475. __fscache_use_cookie(cookie);
  476. atomic_inc(&object->n_reads);
  477. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  478. if (fscache_submit_op(object, &op->op) < 0)
  479. goto nobufs_unlock_dec;
  480. spin_unlock(&cookie->lock);
  481. fscache_stat(&fscache_n_retrieval_ops);
  482. /* pin the netfs read context in case we need to do the actual netfs
  483. * read because we've encountered a cache read failure */
  484. fscache_get_context(object->cookie, op->context);
  485. /* we wait for the operation to become active, and then process it
  486. * *here*, in this thread, and not in the thread pool */
  487. ret = fscache_wait_for_operation_activation(
  488. object, &op->op,
  489. __fscache_stat(&fscache_n_retrieval_op_waits),
  490. __fscache_stat(&fscache_n_retrievals_object_dead),
  491. fscache_do_cancel_retrieval);
  492. if (ret < 0)
  493. goto error;
  494. /* ask the cache to honour the operation */
  495. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  496. fscache_stat(&fscache_n_cop_allocate_pages);
  497. ret = object->cache->ops->allocate_pages(
  498. op, pages, nr_pages, gfp);
  499. fscache_stat_d(&fscache_n_cop_allocate_pages);
  500. } else {
  501. fscache_stat(&fscache_n_cop_read_or_alloc_pages);
  502. ret = object->cache->ops->read_or_alloc_pages(
  503. op, pages, nr_pages, gfp);
  504. fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
  505. }
  506. error:
  507. if (ret == -ENOMEM)
  508. fscache_stat(&fscache_n_retrievals_nomem);
  509. else if (ret == -ERESTARTSYS)
  510. fscache_stat(&fscache_n_retrievals_intr);
  511. else if (ret == -ENODATA)
  512. fscache_stat(&fscache_n_retrievals_nodata);
  513. else if (ret < 0)
  514. fscache_stat(&fscache_n_retrievals_nobufs);
  515. else
  516. fscache_stat(&fscache_n_retrievals_ok);
  517. fscache_put_retrieval(op);
  518. _leave(" = %d", ret);
  519. return ret;
  520. nobufs_unlock_dec:
  521. atomic_dec(&object->n_reads);
  522. wake_cookie = __fscache_unuse_cookie(cookie);
  523. nobufs_unlock:
  524. spin_unlock(&cookie->lock);
  525. kfree(op);
  526. if (wake_cookie)
  527. __fscache_wake_unused_cookie(cookie);
  528. nobufs:
  529. fscache_stat(&fscache_n_retrievals_nobufs);
  530. _leave(" = -ENOBUFS");
  531. return -ENOBUFS;
  532. }
  533. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  534. /*
  535. * allocate a block in the cache on which to store a page
  536. * - we return:
  537. * -ENOMEM - out of memory, nothing done
  538. * -ERESTARTSYS - interrupted
  539. * -ENOBUFS - no backing object available in which to cache the block
  540. * 0 - block allocated
  541. */
  542. int __fscache_alloc_page(struct fscache_cookie *cookie,
  543. struct page *page,
  544. gfp_t gfp)
  545. {
  546. struct fscache_retrieval *op;
  547. struct fscache_object *object;
  548. bool wake_cookie = false;
  549. int ret;
  550. _enter("%p,%p,,,", cookie, page);
  551. fscache_stat(&fscache_n_allocs);
  552. if (hlist_empty(&cookie->backing_objects))
  553. goto nobufs;
  554. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  555. ASSERTCMP(page, !=, NULL);
  556. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  557. _leave(" = -ENOBUFS [invalidating]");
  558. return -ENOBUFS;
  559. }
  560. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  561. return -ERESTARTSYS;
  562. op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
  563. if (!op)
  564. return -ENOMEM;
  565. atomic_set(&op->n_pages, 1);
  566. spin_lock(&cookie->lock);
  567. if (!fscache_cookie_enabled(cookie) ||
  568. hlist_empty(&cookie->backing_objects))
  569. goto nobufs_unlock;
  570. object = hlist_entry(cookie->backing_objects.first,
  571. struct fscache_object, cookie_link);
  572. __fscache_use_cookie(cookie);
  573. if (fscache_submit_op(object, &op->op) < 0)
  574. goto nobufs_unlock_dec;
  575. spin_unlock(&cookie->lock);
  576. fscache_stat(&fscache_n_alloc_ops);
  577. ret = fscache_wait_for_operation_activation(
  578. object, &op->op,
  579. __fscache_stat(&fscache_n_alloc_op_waits),
  580. __fscache_stat(&fscache_n_allocs_object_dead),
  581. fscache_do_cancel_retrieval);
  582. if (ret < 0)
  583. goto error;
  584. /* ask the cache to honour the operation */
  585. fscache_stat(&fscache_n_cop_allocate_page);
  586. ret = object->cache->ops->allocate_page(op, page, gfp);
  587. fscache_stat_d(&fscache_n_cop_allocate_page);
  588. error:
  589. if (ret == -ERESTARTSYS)
  590. fscache_stat(&fscache_n_allocs_intr);
  591. else if (ret < 0)
  592. fscache_stat(&fscache_n_allocs_nobufs);
  593. else
  594. fscache_stat(&fscache_n_allocs_ok);
  595. fscache_put_retrieval(op);
  596. _leave(" = %d", ret);
  597. return ret;
  598. nobufs_unlock_dec:
  599. wake_cookie = __fscache_unuse_cookie(cookie);
  600. nobufs_unlock:
  601. spin_unlock(&cookie->lock);
  602. kfree(op);
  603. if (wake_cookie)
  604. __fscache_wake_unused_cookie(cookie);
  605. nobufs:
  606. fscache_stat(&fscache_n_allocs_nobufs);
  607. _leave(" = -ENOBUFS");
  608. return -ENOBUFS;
  609. }
  610. EXPORT_SYMBOL(__fscache_alloc_page);
  611. /*
  612. * Unmark pages allocate in the readahead code path (via:
  613. * fscache_readpages_or_alloc) after delegating to the base filesystem
  614. */
  615. void __fscache_readpages_cancel(struct fscache_cookie *cookie,
  616. struct list_head *pages)
  617. {
  618. struct page *page;
  619. list_for_each_entry(page, pages, lru) {
  620. if (PageFsCache(page))
  621. __fscache_uncache_page(cookie, page);
  622. }
  623. }
  624. EXPORT_SYMBOL(__fscache_readpages_cancel);
  625. /*
  626. * release a write op reference
  627. */
  628. static void fscache_release_write_op(struct fscache_operation *_op)
  629. {
  630. _enter("{OP%x}", _op->debug_id);
  631. }
  632. /*
  633. * perform the background storage of a page into the cache
  634. */
  635. static void fscache_write_op(struct fscache_operation *_op)
  636. {
  637. struct fscache_storage *op =
  638. container_of(_op, struct fscache_storage, op);
  639. struct fscache_object *object = op->op.object;
  640. struct fscache_cookie *cookie;
  641. struct page *page;
  642. unsigned n;
  643. void *results[1];
  644. int ret;
  645. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  646. spin_lock(&object->lock);
  647. cookie = object->cookie;
  648. if (!fscache_object_is_active(object)) {
  649. /* If we get here, then the on-disk cache object likely longer
  650. * exists, so we should just cancel this write operation.
  651. */
  652. spin_unlock(&object->lock);
  653. fscache_op_complete(&op->op, false);
  654. _leave(" [inactive]");
  655. return;
  656. }
  657. if (!cookie) {
  658. /* If we get here, then the cookie belonging to the object was
  659. * detached, probably by the cookie being withdrawn due to
  660. * memory pressure, which means that the pages we might write
  661. * to the cache from no longer exist - therefore, we can just
  662. * cancel this write operation.
  663. */
  664. spin_unlock(&object->lock);
  665. fscache_op_complete(&op->op, false);
  666. _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
  667. _op->flags, _op->state, object->state->short_name,
  668. object->flags);
  669. return;
  670. }
  671. spin_lock(&cookie->stores_lock);
  672. fscache_stat(&fscache_n_store_calls);
  673. /* find a page to store */
  674. page = NULL;
  675. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  676. FSCACHE_COOKIE_PENDING_TAG);
  677. if (n != 1)
  678. goto superseded;
  679. page = results[0];
  680. _debug("gang %d [%lx]", n, page->index);
  681. if (page->index > op->store_limit) {
  682. fscache_stat(&fscache_n_store_pages_over_limit);
  683. goto superseded;
  684. }
  685. radix_tree_tag_set(&cookie->stores, page->index,
  686. FSCACHE_COOKIE_STORING_TAG);
  687. radix_tree_tag_clear(&cookie->stores, page->index,
  688. FSCACHE_COOKIE_PENDING_TAG);
  689. spin_unlock(&cookie->stores_lock);
  690. spin_unlock(&object->lock);
  691. fscache_stat(&fscache_n_store_pages);
  692. fscache_stat(&fscache_n_cop_write_page);
  693. ret = object->cache->ops->write_page(op, page);
  694. fscache_stat_d(&fscache_n_cop_write_page);
  695. fscache_end_page_write(object, page);
  696. if (ret < 0) {
  697. fscache_abort_object(object);
  698. fscache_op_complete(&op->op, true);
  699. } else {
  700. fscache_enqueue_operation(&op->op);
  701. }
  702. _leave("");
  703. return;
  704. superseded:
  705. /* this writer is going away and there aren't any more things to
  706. * write */
  707. _debug("cease");
  708. spin_unlock(&cookie->stores_lock);
  709. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  710. spin_unlock(&object->lock);
  711. fscache_op_complete(&op->op, true);
  712. _leave("");
  713. }
  714. /*
  715. * Clear the pages pending writing for invalidation
  716. */
  717. void fscache_invalidate_writes(struct fscache_cookie *cookie)
  718. {
  719. struct page *page;
  720. void *results[16];
  721. int n, i;
  722. _enter("");
  723. for (;;) {
  724. spin_lock(&cookie->stores_lock);
  725. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
  726. ARRAY_SIZE(results),
  727. FSCACHE_COOKIE_PENDING_TAG);
  728. if (n == 0) {
  729. spin_unlock(&cookie->stores_lock);
  730. break;
  731. }
  732. for (i = n - 1; i >= 0; i--) {
  733. page = results[i];
  734. radix_tree_delete(&cookie->stores, page->index);
  735. }
  736. spin_unlock(&cookie->stores_lock);
  737. for (i = n - 1; i >= 0; i--)
  738. page_cache_release(results[i]);
  739. }
  740. _leave("");
  741. }
  742. /*
  743. * request a page be stored in the cache
  744. * - returns:
  745. * -ENOMEM - out of memory, nothing done
  746. * -ENOBUFS - no backing object available in which to cache the page
  747. * 0 - dispatched a write - it'll call end_io_func() when finished
  748. *
  749. * if the cookie still has a backing object at this point, that object can be
  750. * in one of a few states with respect to storage processing:
  751. *
  752. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  753. * set)
  754. *
  755. * (a) no writes yet
  756. *
  757. * (b) writes deferred till post-creation (mark page for writing and
  758. * return immediately)
  759. *
  760. * (2) negative lookup, object created, initial fill being made from netfs
  761. *
  762. * (a) fill point not yet reached this page (mark page for writing and
  763. * return)
  764. *
  765. * (b) fill point passed this page (queue op to store this page)
  766. *
  767. * (3) object extant (queue op to store this page)
  768. *
  769. * any other state is invalid
  770. */
  771. int __fscache_write_page(struct fscache_cookie *cookie,
  772. struct page *page,
  773. gfp_t gfp)
  774. {
  775. struct fscache_storage *op;
  776. struct fscache_object *object;
  777. bool wake_cookie = false;
  778. int ret;
  779. _enter("%p,%x,", cookie, (u32) page->flags);
  780. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  781. ASSERT(PageFsCache(page));
  782. fscache_stat(&fscache_n_stores);
  783. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  784. _leave(" = -ENOBUFS [invalidating]");
  785. return -ENOBUFS;
  786. }
  787. op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
  788. if (!op)
  789. goto nomem;
  790. fscache_operation_init(&op->op, fscache_write_op,
  791. fscache_release_write_op);
  792. op->op.flags = FSCACHE_OP_ASYNC |
  793. (1 << FSCACHE_OP_WAITING) |
  794. (1 << FSCACHE_OP_UNUSE_COOKIE);
  795. ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
  796. if (ret < 0)
  797. goto nomem_free;
  798. ret = -ENOBUFS;
  799. spin_lock(&cookie->lock);
  800. if (!fscache_cookie_enabled(cookie) ||
  801. hlist_empty(&cookie->backing_objects))
  802. goto nobufs;
  803. object = hlist_entry(cookie->backing_objects.first,
  804. struct fscache_object, cookie_link);
  805. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  806. goto nobufs;
  807. /* add the page to the pending-storage radix tree on the backing
  808. * object */
  809. spin_lock(&object->lock);
  810. spin_lock(&cookie->stores_lock);
  811. _debug("store limit %llx", (unsigned long long) object->store_limit);
  812. ret = radix_tree_insert(&cookie->stores, page->index, page);
  813. if (ret < 0) {
  814. if (ret == -EEXIST)
  815. goto already_queued;
  816. _debug("insert failed %d", ret);
  817. goto nobufs_unlock_obj;
  818. }
  819. radix_tree_tag_set(&cookie->stores, page->index,
  820. FSCACHE_COOKIE_PENDING_TAG);
  821. page_cache_get(page);
  822. /* we only want one writer at a time, but we do need to queue new
  823. * writers after exclusive ops */
  824. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  825. goto already_pending;
  826. spin_unlock(&cookie->stores_lock);
  827. spin_unlock(&object->lock);
  828. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  829. op->store_limit = object->store_limit;
  830. __fscache_use_cookie(cookie);
  831. if (fscache_submit_op(object, &op->op) < 0)
  832. goto submit_failed;
  833. spin_unlock(&cookie->lock);
  834. radix_tree_preload_end();
  835. fscache_stat(&fscache_n_store_ops);
  836. fscache_stat(&fscache_n_stores_ok);
  837. /* the work queue now carries its own ref on the object */
  838. fscache_put_operation(&op->op);
  839. _leave(" = 0");
  840. return 0;
  841. already_queued:
  842. fscache_stat(&fscache_n_stores_again);
  843. already_pending:
  844. spin_unlock(&cookie->stores_lock);
  845. spin_unlock(&object->lock);
  846. spin_unlock(&cookie->lock);
  847. radix_tree_preload_end();
  848. kfree(op);
  849. fscache_stat(&fscache_n_stores_ok);
  850. _leave(" = 0");
  851. return 0;
  852. submit_failed:
  853. spin_lock(&cookie->stores_lock);
  854. radix_tree_delete(&cookie->stores, page->index);
  855. spin_unlock(&cookie->stores_lock);
  856. wake_cookie = __fscache_unuse_cookie(cookie);
  857. page_cache_release(page);
  858. ret = -ENOBUFS;
  859. goto nobufs;
  860. nobufs_unlock_obj:
  861. spin_unlock(&cookie->stores_lock);
  862. spin_unlock(&object->lock);
  863. nobufs:
  864. spin_unlock(&cookie->lock);
  865. radix_tree_preload_end();
  866. kfree(op);
  867. if (wake_cookie)
  868. __fscache_wake_unused_cookie(cookie);
  869. fscache_stat(&fscache_n_stores_nobufs);
  870. _leave(" = -ENOBUFS");
  871. return -ENOBUFS;
  872. nomem_free:
  873. kfree(op);
  874. nomem:
  875. fscache_stat(&fscache_n_stores_oom);
  876. _leave(" = -ENOMEM");
  877. return -ENOMEM;
  878. }
  879. EXPORT_SYMBOL(__fscache_write_page);
  880. /*
  881. * remove a page from the cache
  882. */
  883. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  884. {
  885. struct fscache_object *object;
  886. _enter(",%p", page);
  887. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  888. ASSERTCMP(page, !=, NULL);
  889. fscache_stat(&fscache_n_uncaches);
  890. /* cache withdrawal may beat us to it */
  891. if (!PageFsCache(page))
  892. goto done;
  893. /* get the object */
  894. spin_lock(&cookie->lock);
  895. if (hlist_empty(&cookie->backing_objects)) {
  896. ClearPageFsCache(page);
  897. goto done_unlock;
  898. }
  899. object = hlist_entry(cookie->backing_objects.first,
  900. struct fscache_object, cookie_link);
  901. /* there might now be stuff on disk we could read */
  902. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  903. /* only invoke the cache backend if we managed to mark the page
  904. * uncached here; this deals with synchronisation vs withdrawal */
  905. if (TestClearPageFsCache(page) &&
  906. object->cache->ops->uncache_page) {
  907. /* the cache backend releases the cookie lock */
  908. fscache_stat(&fscache_n_cop_uncache_page);
  909. object->cache->ops->uncache_page(object, page);
  910. fscache_stat_d(&fscache_n_cop_uncache_page);
  911. goto done;
  912. }
  913. done_unlock:
  914. spin_unlock(&cookie->lock);
  915. done:
  916. _leave("");
  917. }
  918. EXPORT_SYMBOL(__fscache_uncache_page);
  919. /**
  920. * fscache_mark_page_cached - Mark a page as being cached
  921. * @op: The retrieval op pages are being marked for
  922. * @page: The page to be marked
  923. *
  924. * Mark a netfs page as being cached. After this is called, the netfs
  925. * must call fscache_uncache_page() to remove the mark.
  926. */
  927. void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
  928. {
  929. struct fscache_cookie *cookie = op->op.object->cookie;
  930. #ifdef CONFIG_FSCACHE_STATS
  931. atomic_inc(&fscache_n_marks);
  932. #endif
  933. _debug("- mark %p{%lx}", page, page->index);
  934. if (TestSetPageFsCache(page)) {
  935. static bool once_only;
  936. if (!once_only) {
  937. once_only = true;
  938. pr_warn("Cookie type %s marked page %lx multiple times\n",
  939. cookie->def->name, page->index);
  940. }
  941. }
  942. if (cookie->def->mark_page_cached)
  943. cookie->def->mark_page_cached(cookie->netfs_data,
  944. op->mapping, page);
  945. }
  946. EXPORT_SYMBOL(fscache_mark_page_cached);
  947. /**
  948. * fscache_mark_pages_cached - Mark pages as being cached
  949. * @op: The retrieval op pages are being marked for
  950. * @pagevec: The pages to be marked
  951. *
  952. * Mark a bunch of netfs pages as being cached. After this is called,
  953. * the netfs must call fscache_uncache_page() to remove the mark.
  954. */
  955. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  956. struct pagevec *pagevec)
  957. {
  958. unsigned long loop;
  959. for (loop = 0; loop < pagevec->nr; loop++)
  960. fscache_mark_page_cached(op, pagevec->pages[loop]);
  961. pagevec_reinit(pagevec);
  962. }
  963. EXPORT_SYMBOL(fscache_mark_pages_cached);
  964. /*
  965. * Uncache all the pages in an inode that are marked PG_fscache, assuming them
  966. * to be associated with the given cookie.
  967. */
  968. void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
  969. struct inode *inode)
  970. {
  971. struct address_space *mapping = inode->i_mapping;
  972. struct pagevec pvec;
  973. pgoff_t next;
  974. int i;
  975. _enter("%p,%p", cookie, inode);
  976. if (!mapping || mapping->nrpages == 0) {
  977. _leave(" [no pages]");
  978. return;
  979. }
  980. pagevec_init(&pvec, 0);
  981. next = 0;
  982. do {
  983. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
  984. break;
  985. for (i = 0; i < pagevec_count(&pvec); i++) {
  986. struct page *page = pvec.pages[i];
  987. next = page->index;
  988. if (PageFsCache(page)) {
  989. __fscache_wait_on_page_write(cookie, page);
  990. __fscache_uncache_page(cookie, page);
  991. }
  992. }
  993. pagevec_release(&pvec);
  994. cond_resched();
  995. } while (++next);
  996. _leave("");
  997. }
  998. EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);