pagelist.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311
  1. /*
  2. * linux/fs/nfs/pagelist.c
  3. *
  4. * A set of helper functions for managing NFS read and write requests.
  5. * The main purpose of these routines is to provide support for the
  6. * coalescing of several requests into a single RPC call.
  7. *
  8. * Copyright 2000, 2001 (c) Trond Myklebust <trond.myklebust@fys.uio.no>
  9. *
  10. */
  11. #include <linux/slab.h>
  12. #include <linux/file.h>
  13. #include <linux/sched.h>
  14. #include <linux/sunrpc/clnt.h>
  15. #include <linux/nfs.h>
  16. #include <linux/nfs3.h>
  17. #include <linux/nfs4.h>
  18. #include <linux/nfs_page.h>
  19. #include <linux/nfs_fs.h>
  20. #include <linux/nfs_mount.h>
  21. #include <linux/export.h>
  22. #include "internal.h"
  23. #include "pnfs.h"
  24. #define NFSDBG_FACILITY NFSDBG_PAGECACHE
  25. static struct kmem_cache *nfs_page_cachep;
  26. static const struct rpc_call_ops nfs_pgio_common_ops;
  27. struct nfs_pgio_mirror *
  28. nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc)
  29. {
  30. return nfs_pgio_has_mirroring(desc) ?
  31. &desc->pg_mirrors[desc->pg_mirror_idx] :
  32. &desc->pg_mirrors[0];
  33. }
  34. EXPORT_SYMBOL_GPL(nfs_pgio_current_mirror);
  35. void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
  36. struct nfs_pgio_header *hdr,
  37. void (*release)(struct nfs_pgio_header *hdr))
  38. {
  39. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  40. hdr->req = nfs_list_entry(mirror->pg_list.next);
  41. hdr->inode = desc->pg_inode;
  42. hdr->cred = hdr->req->wb_context->cred;
  43. hdr->io_start = req_offset(hdr->req);
  44. hdr->good_bytes = mirror->pg_count;
  45. hdr->io_completion = desc->pg_io_completion;
  46. hdr->dreq = desc->pg_dreq;
  47. hdr->release = release;
  48. hdr->completion_ops = desc->pg_completion_ops;
  49. if (hdr->completion_ops->init_hdr)
  50. hdr->completion_ops->init_hdr(hdr);
  51. hdr->pgio_mirror_idx = desc->pg_mirror_idx;
  52. }
  53. EXPORT_SYMBOL_GPL(nfs_pgheader_init);
  54. void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
  55. {
  56. unsigned int new = pos - hdr->io_start;
  57. if (hdr->good_bytes > new) {
  58. hdr->good_bytes = new;
  59. clear_bit(NFS_IOHDR_EOF, &hdr->flags);
  60. if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags))
  61. hdr->error = error;
  62. }
  63. }
  64. static inline struct nfs_page *
  65. nfs_page_alloc(void)
  66. {
  67. struct nfs_page *p = kmem_cache_zalloc(nfs_page_cachep, GFP_NOIO);
  68. if (p)
  69. INIT_LIST_HEAD(&p->wb_list);
  70. return p;
  71. }
  72. static inline void
  73. nfs_page_free(struct nfs_page *p)
  74. {
  75. kmem_cache_free(nfs_page_cachep, p);
  76. }
  77. /**
  78. * nfs_iocounter_wait - wait for i/o to complete
  79. * @l_ctx: nfs_lock_context with io_counter to use
  80. *
  81. * returns -ERESTARTSYS if interrupted by a fatal signal.
  82. * Otherwise returns 0 once the io_count hits 0.
  83. */
  84. int
  85. nfs_iocounter_wait(struct nfs_lock_context *l_ctx)
  86. {
  87. return wait_var_event_killable(&l_ctx->io_count,
  88. !atomic_read(&l_ctx->io_count));
  89. }
  90. /**
  91. * nfs_async_iocounter_wait - wait on a rpc_waitqueue for I/O
  92. * to complete
  93. * @task: the rpc_task that should wait
  94. * @l_ctx: nfs_lock_context with io_counter to check
  95. *
  96. * Returns true if there is outstanding I/O to wait on and the
  97. * task has been put to sleep.
  98. */
  99. bool
  100. nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
  101. {
  102. struct inode *inode = d_inode(l_ctx->open_context->dentry);
  103. bool ret = false;
  104. if (atomic_read(&l_ctx->io_count) > 0) {
  105. rpc_sleep_on(&NFS_SERVER(inode)->uoc_rpcwaitq, task, NULL);
  106. ret = true;
  107. }
  108. if (atomic_read(&l_ctx->io_count) == 0) {
  109. rpc_wake_up_queued_task(&NFS_SERVER(inode)->uoc_rpcwaitq, task);
  110. ret = false;
  111. }
  112. return ret;
  113. }
  114. EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
  115. /*
  116. * nfs_page_group_lock - lock the head of the page group
  117. * @req - request in group that is to be locked
  118. *
  119. * this lock must be held when traversing or modifying the page
  120. * group list
  121. *
  122. * return 0 on success, < 0 on error
  123. */
  124. int
  125. nfs_page_group_lock(struct nfs_page *req)
  126. {
  127. struct nfs_page *head = req->wb_head;
  128. WARN_ON_ONCE(head != head->wb_head);
  129. if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
  130. return 0;
  131. set_bit(PG_CONTENDED1, &head->wb_flags);
  132. smp_mb__after_atomic();
  133. return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
  134. TASK_UNINTERRUPTIBLE);
  135. }
  136. /*
  137. * nfs_page_group_unlock - unlock the head of the page group
  138. * @req - request in group that is to be unlocked
  139. */
  140. void
  141. nfs_page_group_unlock(struct nfs_page *req)
  142. {
  143. struct nfs_page *head = req->wb_head;
  144. WARN_ON_ONCE(head != head->wb_head);
  145. smp_mb__before_atomic();
  146. clear_bit(PG_HEADLOCK, &head->wb_flags);
  147. smp_mb__after_atomic();
  148. if (!test_bit(PG_CONTENDED1, &head->wb_flags))
  149. return;
  150. wake_up_bit(&head->wb_flags, PG_HEADLOCK);
  151. }
  152. /*
  153. * nfs_page_group_sync_on_bit_locked
  154. *
  155. * must be called with page group lock held
  156. */
  157. static bool
  158. nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit)
  159. {
  160. struct nfs_page *head = req->wb_head;
  161. struct nfs_page *tmp;
  162. WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_flags));
  163. WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags));
  164. tmp = req->wb_this_page;
  165. while (tmp != req) {
  166. if (!test_bit(bit, &tmp->wb_flags))
  167. return false;
  168. tmp = tmp->wb_this_page;
  169. }
  170. /* true! reset all bits */
  171. tmp = req;
  172. do {
  173. clear_bit(bit, &tmp->wb_flags);
  174. tmp = tmp->wb_this_page;
  175. } while (tmp != req);
  176. return true;
  177. }
  178. /*
  179. * nfs_page_group_sync_on_bit - set bit on current request, but only
  180. * return true if the bit is set for all requests in page group
  181. * @req - request in page group
  182. * @bit - PG_* bit that is used to sync page group
  183. */
  184. bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
  185. {
  186. bool ret;
  187. nfs_page_group_lock(req);
  188. ret = nfs_page_group_sync_on_bit_locked(req, bit);
  189. nfs_page_group_unlock(req);
  190. return ret;
  191. }
  192. /*
  193. * nfs_page_group_init - Initialize the page group linkage for @req
  194. * @req - a new nfs request
  195. * @prev - the previous request in page group, or NULL if @req is the first
  196. * or only request in the group (the head).
  197. */
  198. static inline void
  199. nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
  200. {
  201. struct inode *inode;
  202. WARN_ON_ONCE(prev == req);
  203. if (!prev) {
  204. /* a head request */
  205. req->wb_head = req;
  206. req->wb_this_page = req;
  207. } else {
  208. /* a subrequest */
  209. WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
  210. WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
  211. req->wb_head = prev->wb_head;
  212. req->wb_this_page = prev->wb_this_page;
  213. prev->wb_this_page = req;
  214. /* All subrequests take a ref on the head request until
  215. * nfs_page_group_destroy is called */
  216. kref_get(&req->wb_head->wb_kref);
  217. /* grab extra ref and bump the request count if head request
  218. * has extra ref from the write/commit path to handle handoff
  219. * between write and commit lists. */
  220. if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
  221. inode = page_file_mapping(req->wb_page)->host;
  222. set_bit(PG_INODE_REF, &req->wb_flags);
  223. kref_get(&req->wb_kref);
  224. atomic_long_inc(&NFS_I(inode)->nrequests);
  225. }
  226. }
  227. }
  228. /*
  229. * nfs_page_group_destroy - sync the destruction of page groups
  230. * @req - request that no longer needs the page group
  231. *
  232. * releases the page group reference from each member once all
  233. * members have called this function.
  234. */
  235. static void
  236. nfs_page_group_destroy(struct kref *kref)
  237. {
  238. struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
  239. struct nfs_page *head = req->wb_head;
  240. struct nfs_page *tmp, *next;
  241. if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
  242. goto out;
  243. tmp = req;
  244. do {
  245. next = tmp->wb_this_page;
  246. /* unlink and free */
  247. tmp->wb_this_page = tmp;
  248. tmp->wb_head = tmp;
  249. nfs_free_request(tmp);
  250. tmp = next;
  251. } while (tmp != req);
  252. out:
  253. /* subrequests must release the ref on the head request */
  254. if (head != req)
  255. nfs_release_request(head);
  256. }
  257. /**
  258. * nfs_create_request - Create an NFS read/write request.
  259. * @ctx: open context to use
  260. * @page: page to write
  261. * @last: last nfs request created for this page group or NULL if head
  262. * @offset: starting offset within the page for the write
  263. * @count: number of bytes to read/write
  264. *
  265. * The page must be locked by the caller. This makes sure we never
  266. * create two different requests for the same page.
  267. * User should ensure it is safe to sleep in this function.
  268. */
  269. struct nfs_page *
  270. nfs_create_request(struct nfs_open_context *ctx, struct page *page,
  271. struct nfs_page *last, unsigned int offset,
  272. unsigned int count)
  273. {
  274. struct nfs_page *req;
  275. struct nfs_lock_context *l_ctx;
  276. if (test_bit(NFS_CONTEXT_BAD, &ctx->flags))
  277. return ERR_PTR(-EBADF);
  278. /* try to allocate the request struct */
  279. req = nfs_page_alloc();
  280. if (req == NULL)
  281. return ERR_PTR(-ENOMEM);
  282. /* get lock context early so we can deal with alloc failures */
  283. l_ctx = nfs_get_lock_context(ctx);
  284. if (IS_ERR(l_ctx)) {
  285. nfs_page_free(req);
  286. return ERR_CAST(l_ctx);
  287. }
  288. req->wb_lock_context = l_ctx;
  289. atomic_inc(&l_ctx->io_count);
  290. /* Initialize the request struct. Initially, we assume a
  291. * long write-back delay. This will be adjusted in
  292. * update_nfs_request below if the region is not locked. */
  293. req->wb_page = page;
  294. if (page) {
  295. req->wb_index = page_index(page);
  296. get_page(page);
  297. }
  298. req->wb_offset = offset;
  299. req->wb_pgbase = offset;
  300. req->wb_bytes = count;
  301. req->wb_context = get_nfs_open_context(ctx);
  302. kref_init(&req->wb_kref);
  303. nfs_page_group_init(req, last);
  304. return req;
  305. }
  306. /**
  307. * nfs_unlock_request - Unlock request and wake up sleepers.
  308. * @req:
  309. */
  310. void nfs_unlock_request(struct nfs_page *req)
  311. {
  312. if (!NFS_WBACK_BUSY(req)) {
  313. printk(KERN_ERR "NFS: Invalid unlock attempted\n");
  314. BUG();
  315. }
  316. smp_mb__before_atomic();
  317. clear_bit(PG_BUSY, &req->wb_flags);
  318. smp_mb__after_atomic();
  319. if (!test_bit(PG_CONTENDED2, &req->wb_flags))
  320. return;
  321. wake_up_bit(&req->wb_flags, PG_BUSY);
  322. }
  323. /**
  324. * nfs_unlock_and_release_request - Unlock request and release the nfs_page
  325. * @req:
  326. */
  327. void nfs_unlock_and_release_request(struct nfs_page *req)
  328. {
  329. nfs_unlock_request(req);
  330. nfs_release_request(req);
  331. }
  332. /*
  333. * nfs_clear_request - Free up all resources allocated to the request
  334. * @req:
  335. *
  336. * Release page and open context resources associated with a read/write
  337. * request after it has completed.
  338. */
  339. static void nfs_clear_request(struct nfs_page *req)
  340. {
  341. struct page *page = req->wb_page;
  342. struct nfs_open_context *ctx = req->wb_context;
  343. struct nfs_lock_context *l_ctx = req->wb_lock_context;
  344. if (page != NULL) {
  345. put_page(page);
  346. req->wb_page = NULL;
  347. }
  348. if (l_ctx != NULL) {
  349. if (atomic_dec_and_test(&l_ctx->io_count)) {
  350. wake_up_var(&l_ctx->io_count);
  351. if (test_bit(NFS_CONTEXT_UNLOCK, &ctx->flags))
  352. rpc_wake_up(&NFS_SERVER(d_inode(ctx->dentry))->uoc_rpcwaitq);
  353. }
  354. nfs_put_lock_context(l_ctx);
  355. req->wb_lock_context = NULL;
  356. }
  357. if (ctx != NULL) {
  358. put_nfs_open_context(ctx);
  359. req->wb_context = NULL;
  360. }
  361. }
  362. /**
  363. * nfs_release_request - Release the count on an NFS read/write request
  364. * @req: request to release
  365. *
  366. * Note: Should never be called with the spinlock held!
  367. */
  368. void nfs_free_request(struct nfs_page *req)
  369. {
  370. WARN_ON_ONCE(req->wb_this_page != req);
  371. /* extra debug: make sure no sync bits are still set */
  372. WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
  373. WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags));
  374. WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags));
  375. WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags));
  376. WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags));
  377. /* Release struct file and open context */
  378. nfs_clear_request(req);
  379. nfs_page_free(req);
  380. }
  381. void nfs_release_request(struct nfs_page *req)
  382. {
  383. kref_put(&req->wb_kref, nfs_page_group_destroy);
  384. }
  385. EXPORT_SYMBOL_GPL(nfs_release_request);
  386. /**
  387. * nfs_wait_on_request - Wait for a request to complete.
  388. * @req: request to wait upon.
  389. *
  390. * Interruptible by fatal signals only.
  391. * The user is responsible for holding a count on the request.
  392. */
  393. int
  394. nfs_wait_on_request(struct nfs_page *req)
  395. {
  396. if (!test_bit(PG_BUSY, &req->wb_flags))
  397. return 0;
  398. set_bit(PG_CONTENDED2, &req->wb_flags);
  399. smp_mb__after_atomic();
  400. return wait_on_bit_io(&req->wb_flags, PG_BUSY,
  401. TASK_UNINTERRUPTIBLE);
  402. }
  403. EXPORT_SYMBOL_GPL(nfs_wait_on_request);
  404. /*
  405. * nfs_generic_pg_test - determine if requests can be coalesced
  406. * @desc: pointer to descriptor
  407. * @prev: previous request in desc, or NULL
  408. * @req: this request
  409. *
  410. * Returns zero if @req can be coalesced into @desc, otherwise it returns
  411. * the size of the request.
  412. */
  413. size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
  414. struct nfs_page *prev, struct nfs_page *req)
  415. {
  416. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  417. if (mirror->pg_count > mirror->pg_bsize) {
  418. /* should never happen */
  419. WARN_ON_ONCE(1);
  420. return 0;
  421. }
  422. /*
  423. * Limit the request size so that we can still allocate a page array
  424. * for it without upsetting the slab allocator.
  425. */
  426. if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) *
  427. sizeof(struct page *) > PAGE_SIZE)
  428. return 0;
  429. return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes);
  430. }
  431. EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
  432. struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
  433. {
  434. struct nfs_pgio_header *hdr = ops->rw_alloc_header();
  435. if (hdr) {
  436. INIT_LIST_HEAD(&hdr->pages);
  437. hdr->rw_ops = ops;
  438. }
  439. return hdr;
  440. }
  441. EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
  442. /**
  443. * nfs_pgio_data_destroy - make @hdr suitable for reuse
  444. *
  445. * Frees memory and releases refs from nfs_generic_pgio, so that it may
  446. * be called again.
  447. *
  448. * @hdr: A header that has had nfs_generic_pgio called
  449. */
  450. static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
  451. {
  452. if (hdr->args.context)
  453. put_nfs_open_context(hdr->args.context);
  454. if (hdr->page_array.pagevec != hdr->page_array.page_array)
  455. kfree(hdr->page_array.pagevec);
  456. }
  457. /*
  458. * nfs_pgio_header_free - Free a read or write header
  459. * @hdr: The header to free
  460. */
  461. void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
  462. {
  463. nfs_pgio_data_destroy(hdr);
  464. hdr->rw_ops->rw_free_header(hdr);
  465. }
  466. EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
  467. /**
  468. * nfs_pgio_rpcsetup - Set up arguments for a pageio call
  469. * @hdr: The pageio hdr
  470. * @count: Number of bytes to read
  471. * @offset: Initial offset
  472. * @how: How to commit data (writes only)
  473. * @cinfo: Commit information for the call (writes only)
  474. */
  475. static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
  476. unsigned int count,
  477. int how, struct nfs_commit_info *cinfo)
  478. {
  479. struct nfs_page *req = hdr->req;
  480. /* Set up the RPC argument and reply structs
  481. * NB: take care not to mess about with hdr->commit et al. */
  482. hdr->args.fh = NFS_FH(hdr->inode);
  483. hdr->args.offset = req_offset(req);
  484. /* pnfs_set_layoutcommit needs this */
  485. hdr->mds_offset = hdr->args.offset;
  486. hdr->args.pgbase = req->wb_pgbase;
  487. hdr->args.pages = hdr->page_array.pagevec;
  488. hdr->args.count = count;
  489. hdr->args.context = get_nfs_open_context(req->wb_context);
  490. hdr->args.lock_context = req->wb_lock_context;
  491. hdr->args.stable = NFS_UNSTABLE;
  492. switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
  493. case 0:
  494. break;
  495. case FLUSH_COND_STABLE:
  496. if (nfs_reqs_to_commit(cinfo))
  497. break;
  498. /* fall through */
  499. default:
  500. hdr->args.stable = NFS_FILE_SYNC;
  501. }
  502. hdr->res.fattr = &hdr->fattr;
  503. hdr->res.count = count;
  504. hdr->res.eof = 0;
  505. hdr->res.verf = &hdr->verf;
  506. nfs_fattr_init(&hdr->fattr);
  507. }
  508. /**
  509. * nfs_pgio_prepare - Prepare pageio hdr to go over the wire
  510. * @task: The current task
  511. * @calldata: pageio header to prepare
  512. */
  513. static void nfs_pgio_prepare(struct rpc_task *task, void *calldata)
  514. {
  515. struct nfs_pgio_header *hdr = calldata;
  516. int err;
  517. err = NFS_PROTO(hdr->inode)->pgio_rpc_prepare(task, hdr);
  518. if (err)
  519. rpc_exit(task, err);
  520. }
  521. int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
  522. struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
  523. const struct rpc_call_ops *call_ops, int how, int flags)
  524. {
  525. struct rpc_task *task;
  526. struct rpc_message msg = {
  527. .rpc_argp = &hdr->args,
  528. .rpc_resp = &hdr->res,
  529. .rpc_cred = cred,
  530. };
  531. struct rpc_task_setup task_setup_data = {
  532. .rpc_client = clnt,
  533. .task = &hdr->task,
  534. .rpc_message = &msg,
  535. .callback_ops = call_ops,
  536. .callback_data = hdr,
  537. .workqueue = nfsiod_workqueue,
  538. .flags = RPC_TASK_ASYNC | flags,
  539. };
  540. int ret = 0;
  541. hdr->rw_ops->rw_initiate(hdr, &msg, rpc_ops, &task_setup_data, how);
  542. dprintk("NFS: initiated pgio call "
  543. "(req %s/%llu, %u bytes @ offset %llu)\n",
  544. hdr->inode->i_sb->s_id,
  545. (unsigned long long)NFS_FILEID(hdr->inode),
  546. hdr->args.count,
  547. (unsigned long long)hdr->args.offset);
  548. task = rpc_run_task(&task_setup_data);
  549. if (IS_ERR(task)) {
  550. ret = PTR_ERR(task);
  551. goto out;
  552. }
  553. if (how & FLUSH_SYNC) {
  554. ret = rpc_wait_for_completion_task(task);
  555. if (ret == 0)
  556. ret = task->tk_status;
  557. }
  558. rpc_put_task(task);
  559. out:
  560. return ret;
  561. }
  562. EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
  563. /**
  564. * nfs_pgio_error - Clean up from a pageio error
  565. * @desc: IO descriptor
  566. * @hdr: pageio header
  567. */
  568. static void nfs_pgio_error(struct nfs_pgio_header *hdr)
  569. {
  570. set_bit(NFS_IOHDR_REDO, &hdr->flags);
  571. hdr->completion_ops->completion(hdr);
  572. }
  573. /**
  574. * nfs_pgio_release - Release pageio data
  575. * @calldata: The pageio header to release
  576. */
  577. static void nfs_pgio_release(void *calldata)
  578. {
  579. struct nfs_pgio_header *hdr = calldata;
  580. hdr->completion_ops->completion(hdr);
  581. }
  582. static void nfs_pageio_mirror_init(struct nfs_pgio_mirror *mirror,
  583. unsigned int bsize)
  584. {
  585. INIT_LIST_HEAD(&mirror->pg_list);
  586. mirror->pg_bytes_written = 0;
  587. mirror->pg_count = 0;
  588. mirror->pg_bsize = bsize;
  589. mirror->pg_base = 0;
  590. mirror->pg_recoalesce = 0;
  591. }
  592. /**
  593. * nfs_pageio_init - initialise a page io descriptor
  594. * @desc: pointer to descriptor
  595. * @inode: pointer to inode
  596. * @pg_ops: pointer to pageio operations
  597. * @compl_ops: pointer to pageio completion operations
  598. * @rw_ops: pointer to nfs read/write operations
  599. * @bsize: io block size
  600. * @io_flags: extra parameters for the io function
  601. */
  602. void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
  603. struct inode *inode,
  604. const struct nfs_pageio_ops *pg_ops,
  605. const struct nfs_pgio_completion_ops *compl_ops,
  606. const struct nfs_rw_ops *rw_ops,
  607. size_t bsize,
  608. int io_flags)
  609. {
  610. desc->pg_moreio = 0;
  611. desc->pg_inode = inode;
  612. desc->pg_ops = pg_ops;
  613. desc->pg_completion_ops = compl_ops;
  614. desc->pg_rw_ops = rw_ops;
  615. desc->pg_ioflags = io_flags;
  616. desc->pg_error = 0;
  617. desc->pg_lseg = NULL;
  618. desc->pg_io_completion = NULL;
  619. desc->pg_dreq = NULL;
  620. desc->pg_bsize = bsize;
  621. desc->pg_mirror_count = 1;
  622. desc->pg_mirror_idx = 0;
  623. desc->pg_mirrors_dynamic = NULL;
  624. desc->pg_mirrors = desc->pg_mirrors_static;
  625. nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
  626. }
  627. /**
  628. * nfs_pgio_result - Basic pageio error handling
  629. * @task: The task that ran
  630. * @calldata: Pageio header to check
  631. */
  632. static void nfs_pgio_result(struct rpc_task *task, void *calldata)
  633. {
  634. struct nfs_pgio_header *hdr = calldata;
  635. struct inode *inode = hdr->inode;
  636. dprintk("NFS: %s: %5u, (status %d)\n", __func__,
  637. task->tk_pid, task->tk_status);
  638. if (hdr->rw_ops->rw_done(task, hdr, inode) != 0)
  639. return;
  640. if (task->tk_status < 0)
  641. nfs_set_pgio_error(hdr, task->tk_status, hdr->args.offset);
  642. else
  643. hdr->rw_ops->rw_result(task, hdr);
  644. }
  645. /*
  646. * Create an RPC task for the given read or write request and kick it.
  647. * The page must have been locked by the caller.
  648. *
  649. * It may happen that the page we're passed is not marked dirty.
  650. * This is the case if nfs_updatepage detects a conflicting request
  651. * that has been written but not committed.
  652. */
  653. int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
  654. struct nfs_pgio_header *hdr)
  655. {
  656. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  657. struct nfs_page *req;
  658. struct page **pages,
  659. *last_page;
  660. struct list_head *head = &mirror->pg_list;
  661. struct nfs_commit_info cinfo;
  662. struct nfs_page_array *pg_array = &hdr->page_array;
  663. unsigned int pagecount, pageused;
  664. gfp_t gfp_flags = GFP_KERNEL;
  665. pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count);
  666. pg_array->npages = pagecount;
  667. if (pagecount <= ARRAY_SIZE(pg_array->page_array))
  668. pg_array->pagevec = pg_array->page_array;
  669. else {
  670. if (hdr->rw_mode == FMODE_WRITE)
  671. gfp_flags = GFP_NOIO;
  672. pg_array->pagevec = kcalloc(pagecount, sizeof(struct page *), gfp_flags);
  673. if (!pg_array->pagevec) {
  674. pg_array->npages = 0;
  675. nfs_pgio_error(hdr);
  676. desc->pg_error = -ENOMEM;
  677. return desc->pg_error;
  678. }
  679. }
  680. nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
  681. pages = hdr->page_array.pagevec;
  682. last_page = NULL;
  683. pageused = 0;
  684. while (!list_empty(head)) {
  685. req = nfs_list_entry(head->next);
  686. nfs_list_remove_request(req);
  687. nfs_list_add_request(req, &hdr->pages);
  688. if (!last_page || last_page != req->wb_page) {
  689. pageused++;
  690. if (pageused > pagecount)
  691. break;
  692. *pages++ = last_page = req->wb_page;
  693. }
  694. }
  695. if (WARN_ON_ONCE(pageused != pagecount)) {
  696. nfs_pgio_error(hdr);
  697. desc->pg_error = -EINVAL;
  698. return desc->pg_error;
  699. }
  700. if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
  701. (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
  702. desc->pg_ioflags &= ~FLUSH_COND_STABLE;
  703. /* Set up the argument struct */
  704. nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo);
  705. desc->pg_rpc_callops = &nfs_pgio_common_ops;
  706. return 0;
  707. }
  708. EXPORT_SYMBOL_GPL(nfs_generic_pgio);
  709. static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
  710. {
  711. struct nfs_pgio_header *hdr;
  712. int ret;
  713. hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
  714. if (!hdr) {
  715. desc->pg_error = -ENOMEM;
  716. return desc->pg_error;
  717. }
  718. nfs_pgheader_init(desc, hdr, nfs_pgio_header_free);
  719. ret = nfs_generic_pgio(desc, hdr);
  720. if (ret == 0)
  721. ret = nfs_initiate_pgio(NFS_CLIENT(hdr->inode),
  722. hdr,
  723. hdr->cred,
  724. NFS_PROTO(hdr->inode),
  725. desc->pg_rpc_callops,
  726. desc->pg_ioflags, 0);
  727. return ret;
  728. }
  729. static struct nfs_pgio_mirror *
  730. nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
  731. unsigned int mirror_count)
  732. {
  733. struct nfs_pgio_mirror *ret;
  734. unsigned int i;
  735. kfree(desc->pg_mirrors_dynamic);
  736. desc->pg_mirrors_dynamic = NULL;
  737. if (mirror_count == 1)
  738. return desc->pg_mirrors_static;
  739. ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_NOFS);
  740. if (ret != NULL) {
  741. for (i = 0; i < mirror_count; i++)
  742. nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
  743. desc->pg_mirrors_dynamic = ret;
  744. }
  745. return ret;
  746. }
  747. /*
  748. * nfs_pageio_setup_mirroring - determine if mirroring is to be used
  749. * by calling the pg_get_mirror_count op
  750. */
  751. static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
  752. struct nfs_page *req)
  753. {
  754. unsigned int mirror_count = 1;
  755. if (pgio->pg_ops->pg_get_mirror_count)
  756. mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
  757. if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
  758. return;
  759. if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
  760. pgio->pg_error = -EINVAL;
  761. return;
  762. }
  763. pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
  764. if (pgio->pg_mirrors == NULL) {
  765. pgio->pg_error = -ENOMEM;
  766. pgio->pg_mirrors = pgio->pg_mirrors_static;
  767. mirror_count = 1;
  768. }
  769. pgio->pg_mirror_count = mirror_count;
  770. }
  771. /*
  772. * nfs_pageio_stop_mirroring - stop using mirroring (set mirror count to 1)
  773. */
  774. void nfs_pageio_stop_mirroring(struct nfs_pageio_descriptor *pgio)
  775. {
  776. pgio->pg_mirror_count = 1;
  777. pgio->pg_mirror_idx = 0;
  778. }
  779. static void nfs_pageio_cleanup_mirroring(struct nfs_pageio_descriptor *pgio)
  780. {
  781. pgio->pg_mirror_count = 1;
  782. pgio->pg_mirror_idx = 0;
  783. pgio->pg_mirrors = pgio->pg_mirrors_static;
  784. kfree(pgio->pg_mirrors_dynamic);
  785. pgio->pg_mirrors_dynamic = NULL;
  786. }
  787. static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
  788. const struct nfs_lock_context *l2)
  789. {
  790. return l1->lockowner == l2->lockowner;
  791. }
  792. /**
  793. * nfs_can_coalesce_requests - test two requests for compatibility
  794. * @prev: pointer to nfs_page
  795. * @req: pointer to nfs_page
  796. *
  797. * The nfs_page structures 'prev' and 'req' are compared to ensure that the
  798. * page data area they describe is contiguous, and that their RPC
  799. * credentials, NFSv4 open state, and lockowners are the same.
  800. *
  801. * Return 'true' if this is the case, else return 'false'.
  802. */
  803. static bool nfs_can_coalesce_requests(struct nfs_page *prev,
  804. struct nfs_page *req,
  805. struct nfs_pageio_descriptor *pgio)
  806. {
  807. size_t size;
  808. struct file_lock_context *flctx;
  809. if (prev) {
  810. if (!nfs_match_open_context(req->wb_context, prev->wb_context))
  811. return false;
  812. flctx = d_inode(req->wb_context->dentry)->i_flctx;
  813. if (flctx != NULL &&
  814. !(list_empty_careful(&flctx->flc_posix) &&
  815. list_empty_careful(&flctx->flc_flock)) &&
  816. !nfs_match_lock_context(req->wb_lock_context,
  817. prev->wb_lock_context))
  818. return false;
  819. if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
  820. return false;
  821. if (req->wb_page == prev->wb_page) {
  822. if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
  823. return false;
  824. } else {
  825. if (req->wb_pgbase != 0 ||
  826. prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
  827. return false;
  828. }
  829. }
  830. size = pgio->pg_ops->pg_test(pgio, prev, req);
  831. WARN_ON_ONCE(size > req->wb_bytes);
  832. if (size && size < req->wb_bytes)
  833. req->wb_bytes = size;
  834. return size > 0;
  835. }
  836. /**
  837. * nfs_pageio_do_add_request - Attempt to coalesce a request into a page list.
  838. * @desc: destination io descriptor
  839. * @req: request
  840. *
  841. * Returns true if the request 'req' was successfully coalesced into the
  842. * existing list of pages 'desc'.
  843. */
  844. static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc,
  845. struct nfs_page *req)
  846. {
  847. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  848. struct nfs_page *prev = NULL;
  849. if (mirror->pg_count != 0) {
  850. prev = nfs_list_entry(mirror->pg_list.prev);
  851. } else {
  852. if (desc->pg_ops->pg_init)
  853. desc->pg_ops->pg_init(desc, req);
  854. if (desc->pg_error < 0)
  855. return 0;
  856. mirror->pg_base = req->wb_pgbase;
  857. }
  858. if (!nfs_can_coalesce_requests(prev, req, desc))
  859. return 0;
  860. nfs_list_remove_request(req);
  861. nfs_list_add_request(req, &mirror->pg_list);
  862. mirror->pg_count += req->wb_bytes;
  863. return 1;
  864. }
  865. /*
  866. * Helper for nfs_pageio_add_request and nfs_pageio_complete
  867. */
  868. static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc)
  869. {
  870. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  871. if (!list_empty(&mirror->pg_list)) {
  872. int error = desc->pg_ops->pg_doio(desc);
  873. if (error < 0)
  874. desc->pg_error = error;
  875. else
  876. mirror->pg_bytes_written += mirror->pg_count;
  877. }
  878. if (list_empty(&mirror->pg_list)) {
  879. mirror->pg_count = 0;
  880. mirror->pg_base = 0;
  881. }
  882. }
  883. /**
  884. * nfs_pageio_add_request - Attempt to coalesce a request into a page list.
  885. * @desc: destination io descriptor
  886. * @req: request
  887. *
  888. * This may split a request into subrequests which are all part of the
  889. * same page group.
  890. *
  891. * Returns true if the request 'req' was successfully coalesced into the
  892. * existing list of pages 'desc'.
  893. */
  894. static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
  895. struct nfs_page *req)
  896. {
  897. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  898. struct nfs_page *subreq;
  899. unsigned int bytes_left = 0;
  900. unsigned int offset, pgbase;
  901. nfs_page_group_lock(req);
  902. subreq = req;
  903. bytes_left = subreq->wb_bytes;
  904. offset = subreq->wb_offset;
  905. pgbase = subreq->wb_pgbase;
  906. do {
  907. if (!nfs_pageio_do_add_request(desc, subreq)) {
  908. /* make sure pg_test call(s) did nothing */
  909. WARN_ON_ONCE(subreq->wb_bytes != bytes_left);
  910. WARN_ON_ONCE(subreq->wb_offset != offset);
  911. WARN_ON_ONCE(subreq->wb_pgbase != pgbase);
  912. nfs_page_group_unlock(req);
  913. desc->pg_moreio = 1;
  914. nfs_pageio_doio(desc);
  915. if (desc->pg_error < 0)
  916. return 0;
  917. if (mirror->pg_recoalesce)
  918. return 0;
  919. /* retry add_request for this subreq */
  920. nfs_page_group_lock(req);
  921. continue;
  922. }
  923. /* check for buggy pg_test call(s) */
  924. WARN_ON_ONCE(subreq->wb_bytes + subreq->wb_pgbase > PAGE_SIZE);
  925. WARN_ON_ONCE(subreq->wb_bytes > bytes_left);
  926. WARN_ON_ONCE(subreq->wb_bytes == 0);
  927. bytes_left -= subreq->wb_bytes;
  928. offset += subreq->wb_bytes;
  929. pgbase += subreq->wb_bytes;
  930. if (bytes_left) {
  931. subreq = nfs_create_request(req->wb_context,
  932. req->wb_page,
  933. subreq, pgbase, bytes_left);
  934. if (IS_ERR(subreq))
  935. goto err_ptr;
  936. nfs_lock_request(subreq);
  937. subreq->wb_offset = offset;
  938. subreq->wb_index = req->wb_index;
  939. }
  940. } while (bytes_left > 0);
  941. nfs_page_group_unlock(req);
  942. return 1;
  943. err_ptr:
  944. desc->pg_error = PTR_ERR(subreq);
  945. nfs_page_group_unlock(req);
  946. return 0;
  947. }
  948. static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
  949. {
  950. struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
  951. LIST_HEAD(head);
  952. do {
  953. list_splice_init(&mirror->pg_list, &head);
  954. mirror->pg_bytes_written -= mirror->pg_count;
  955. mirror->pg_count = 0;
  956. mirror->pg_base = 0;
  957. mirror->pg_recoalesce = 0;
  958. while (!list_empty(&head)) {
  959. struct nfs_page *req;
  960. req = list_first_entry(&head, struct nfs_page, wb_list);
  961. nfs_list_remove_request(req);
  962. if (__nfs_pageio_add_request(desc, req))
  963. continue;
  964. if (desc->pg_error < 0) {
  965. list_splice_tail(&head, &mirror->pg_list);
  966. mirror->pg_recoalesce = 1;
  967. return 0;
  968. }
  969. break;
  970. }
  971. } while (mirror->pg_recoalesce);
  972. return 1;
  973. }
  974. static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc,
  975. struct nfs_page *req)
  976. {
  977. int ret;
  978. do {
  979. ret = __nfs_pageio_add_request(desc, req);
  980. if (ret)
  981. break;
  982. if (desc->pg_error < 0)
  983. break;
  984. ret = nfs_do_recoalesce(desc);
  985. } while (ret);
  986. return ret;
  987. }
  988. static void nfs_pageio_error_cleanup(struct nfs_pageio_descriptor *desc)
  989. {
  990. u32 midx;
  991. struct nfs_pgio_mirror *mirror;
  992. if (!desc->pg_error)
  993. return;
  994. for (midx = 0; midx < desc->pg_mirror_count; midx++) {
  995. mirror = &desc->pg_mirrors[midx];
  996. desc->pg_completion_ops->error_cleanup(&mirror->pg_list);
  997. }
  998. }
  999. int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
  1000. struct nfs_page *req)
  1001. {
  1002. u32 midx;
  1003. unsigned int pgbase, offset, bytes;
  1004. struct nfs_page *dupreq, *lastreq;
  1005. pgbase = req->wb_pgbase;
  1006. offset = req->wb_offset;
  1007. bytes = req->wb_bytes;
  1008. nfs_pageio_setup_mirroring(desc, req);
  1009. if (desc->pg_error < 0)
  1010. goto out_failed;
  1011. for (midx = 0; midx < desc->pg_mirror_count; midx++) {
  1012. if (midx) {
  1013. nfs_page_group_lock(req);
  1014. /* find the last request */
  1015. for (lastreq = req->wb_head;
  1016. lastreq->wb_this_page != req->wb_head;
  1017. lastreq = lastreq->wb_this_page)
  1018. ;
  1019. dupreq = nfs_create_request(req->wb_context,
  1020. req->wb_page, lastreq, pgbase, bytes);
  1021. if (IS_ERR(dupreq)) {
  1022. nfs_page_group_unlock(req);
  1023. desc->pg_error = PTR_ERR(dupreq);
  1024. goto out_failed;
  1025. }
  1026. nfs_lock_request(dupreq);
  1027. nfs_page_group_unlock(req);
  1028. dupreq->wb_offset = offset;
  1029. dupreq->wb_index = req->wb_index;
  1030. } else
  1031. dupreq = req;
  1032. if (nfs_pgio_has_mirroring(desc))
  1033. desc->pg_mirror_idx = midx;
  1034. if (!nfs_pageio_add_request_mirror(desc, dupreq))
  1035. goto out_failed;
  1036. }
  1037. return 1;
  1038. out_failed:
  1039. nfs_pageio_error_cleanup(desc);
  1040. return 0;
  1041. }
  1042. /*
  1043. * nfs_pageio_complete_mirror - Complete I/O on the current mirror of an
  1044. * nfs_pageio_descriptor
  1045. * @desc: pointer to io descriptor
  1046. * @mirror_idx: pointer to mirror index
  1047. */
  1048. static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
  1049. u32 mirror_idx)
  1050. {
  1051. struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx];
  1052. u32 restore_idx = desc->pg_mirror_idx;
  1053. if (nfs_pgio_has_mirroring(desc))
  1054. desc->pg_mirror_idx = mirror_idx;
  1055. for (;;) {
  1056. nfs_pageio_doio(desc);
  1057. if (!mirror->pg_recoalesce)
  1058. break;
  1059. if (!nfs_do_recoalesce(desc))
  1060. break;
  1061. }
  1062. desc->pg_mirror_idx = restore_idx;
  1063. }
  1064. /*
  1065. * nfs_pageio_resend - Transfer requests to new descriptor and resend
  1066. * @hdr - the pgio header to move request from
  1067. * @desc - the pageio descriptor to add requests to
  1068. *
  1069. * Try to move each request (nfs_page) from @hdr to @desc then attempt
  1070. * to send them.
  1071. *
  1072. * Returns 0 on success and < 0 on error.
  1073. */
  1074. int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
  1075. struct nfs_pgio_header *hdr)
  1076. {
  1077. LIST_HEAD(failed);
  1078. desc->pg_io_completion = hdr->io_completion;
  1079. desc->pg_dreq = hdr->dreq;
  1080. while (!list_empty(&hdr->pages)) {
  1081. struct nfs_page *req = nfs_list_entry(hdr->pages.next);
  1082. nfs_list_remove_request(req);
  1083. if (!nfs_pageio_add_request(desc, req))
  1084. nfs_list_add_request(req, &failed);
  1085. }
  1086. nfs_pageio_complete(desc);
  1087. if (!list_empty(&failed)) {
  1088. list_move(&failed, &hdr->pages);
  1089. return desc->pg_error < 0 ? desc->pg_error : -EIO;
  1090. }
  1091. return 0;
  1092. }
  1093. EXPORT_SYMBOL_GPL(nfs_pageio_resend);
  1094. /**
  1095. * nfs_pageio_complete - Complete I/O then cleanup an nfs_pageio_descriptor
  1096. * @desc: pointer to io descriptor
  1097. */
  1098. void nfs_pageio_complete(struct nfs_pageio_descriptor *desc)
  1099. {
  1100. u32 midx;
  1101. for (midx = 0; midx < desc->pg_mirror_count; midx++)
  1102. nfs_pageio_complete_mirror(desc, midx);
  1103. if (desc->pg_error < 0)
  1104. nfs_pageio_error_cleanup(desc);
  1105. if (desc->pg_ops->pg_cleanup)
  1106. desc->pg_ops->pg_cleanup(desc);
  1107. nfs_pageio_cleanup_mirroring(desc);
  1108. }
  1109. /**
  1110. * nfs_pageio_cond_complete - Conditional I/O completion
  1111. * @desc: pointer to io descriptor
  1112. * @index: page index
  1113. *
  1114. * It is important to ensure that processes don't try to take locks
  1115. * on non-contiguous ranges of pages as that might deadlock. This
  1116. * function should be called before attempting to wait on a locked
  1117. * nfs_page. It will complete the I/O if the page index 'index'
  1118. * is not contiguous with the existing list of pages in 'desc'.
  1119. */
  1120. void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
  1121. {
  1122. struct nfs_pgio_mirror *mirror;
  1123. struct nfs_page *prev;
  1124. u32 midx;
  1125. for (midx = 0; midx < desc->pg_mirror_count; midx++) {
  1126. mirror = &desc->pg_mirrors[midx];
  1127. if (!list_empty(&mirror->pg_list)) {
  1128. prev = nfs_list_entry(mirror->pg_list.prev);
  1129. if (index != prev->wb_index + 1) {
  1130. nfs_pageio_complete(desc);
  1131. break;
  1132. }
  1133. }
  1134. }
  1135. }
  1136. int __init nfs_init_nfspagecache(void)
  1137. {
  1138. nfs_page_cachep = kmem_cache_create("nfs_page",
  1139. sizeof(struct nfs_page),
  1140. 0, SLAB_HWCACHE_ALIGN,
  1141. NULL);
  1142. if (nfs_page_cachep == NULL)
  1143. return -ENOMEM;
  1144. return 0;
  1145. }
  1146. void nfs_destroy_nfspagecache(void)
  1147. {
  1148. kmem_cache_destroy(nfs_page_cachep);
  1149. }
  1150. static const struct rpc_call_ops nfs_pgio_common_ops = {
  1151. .rpc_call_prepare = nfs_pgio_prepare,
  1152. .rpc_call_done = nfs_pgio_result,
  1153. .rpc_release = nfs_pgio_release,
  1154. };
  1155. const struct nfs_pageio_ops nfs_pgio_rw_ops = {
  1156. .pg_test = nfs_generic_pg_test,
  1157. .pg_doio = nfs_generic_pg_pgios,
  1158. };