read.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695
  1. /*
  2. * linux/fs/nfs/read.c
  3. *
  4. * Block I/O for NFS
  5. *
  6. * Partial copy of Linus' read cache modifications to fs/nfs/file.c
  7. * modified for async RPC by okir@monad.swb.de
  8. */
  9. #include <linux/time.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/fcntl.h>
  13. #include <linux/stat.h>
  14. #include <linux/mm.h>
  15. #include <linux/slab.h>
  16. #include <linux/pagemap.h>
  17. #include <linux/sunrpc/clnt.h>
  18. #include <linux/nfs_fs.h>
  19. #include <linux/nfs_page.h>
  20. #include <linux/module.h>
  21. #include "nfs4_fs.h"
  22. #include "internal.h"
  23. #include "iostat.h"
  24. #include "fscache.h"
  25. #include "pnfs.h"
  26. #define NFSDBG_FACILITY NFSDBG_PAGECACHE
  27. static const struct nfs_pageio_ops nfs_pageio_read_ops;
  28. static const struct rpc_call_ops nfs_read_common_ops;
  29. static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
  30. static struct kmem_cache *nfs_rdata_cachep;
  31. struct nfs_rw_header *nfs_readhdr_alloc(void)
  32. {
  33. struct nfs_rw_header *rhdr;
  34. rhdr = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
  35. if (rhdr) {
  36. struct nfs_pgio_header *hdr = &rhdr->header;
  37. INIT_LIST_HEAD(&hdr->pages);
  38. INIT_LIST_HEAD(&hdr->rpc_list);
  39. spin_lock_init(&hdr->lock);
  40. atomic_set(&hdr->refcnt, 0);
  41. }
  42. return rhdr;
  43. }
  44. EXPORT_SYMBOL_GPL(nfs_readhdr_alloc);
  45. static struct nfs_pgio_data *nfs_readdata_alloc(struct nfs_pgio_header *hdr,
  46. unsigned int pagecount)
  47. {
  48. struct nfs_pgio_data *data, *prealloc;
  49. prealloc = &container_of(hdr, struct nfs_rw_header, header)->rpc_data;
  50. if (prealloc->header == NULL)
  51. data = prealloc;
  52. else
  53. data = kzalloc(sizeof(*data), GFP_KERNEL);
  54. if (!data)
  55. goto out;
  56. if (nfs_pgarray_set(&data->pages, pagecount)) {
  57. data->header = hdr;
  58. atomic_inc(&hdr->refcnt);
  59. } else {
  60. if (data != prealloc)
  61. kfree(data);
  62. data = NULL;
  63. }
  64. out:
  65. return data;
  66. }
  67. void nfs_readhdr_free(struct nfs_pgio_header *hdr)
  68. {
  69. struct nfs_rw_header *rhdr = container_of(hdr, struct nfs_rw_header, header);
  70. kmem_cache_free(nfs_rdata_cachep, rhdr);
  71. }
  72. EXPORT_SYMBOL_GPL(nfs_readhdr_free);
  73. void nfs_readdata_release(struct nfs_pgio_data *rdata)
  74. {
  75. struct nfs_pgio_header *hdr = rdata->header;
  76. struct nfs_rw_header *read_header = container_of(hdr, struct nfs_rw_header, header);
  77. put_nfs_open_context(rdata->args.context);
  78. if (rdata->pages.pagevec != rdata->pages.page_array)
  79. kfree(rdata->pages.pagevec);
  80. if (rdata == &read_header->rpc_data) {
  81. rdata->header = NULL;
  82. rdata = NULL;
  83. }
  84. if (atomic_dec_and_test(&hdr->refcnt))
  85. hdr->completion_ops->completion(hdr);
  86. /* Note: we only free the rpc_task after callbacks are done.
  87. * See the comment in rpc_free_task() for why
  88. */
  89. kfree(rdata);
  90. }
  91. EXPORT_SYMBOL_GPL(nfs_readdata_release);
  92. static
  93. int nfs_return_empty_page(struct page *page)
  94. {
  95. zero_user(page, 0, PAGE_CACHE_SIZE);
  96. SetPageUptodate(page);
  97. unlock_page(page);
  98. return 0;
  99. }
  100. void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
  101. struct inode *inode, bool force_mds,
  102. const struct nfs_pgio_completion_ops *compl_ops)
  103. {
  104. struct nfs_server *server = NFS_SERVER(inode);
  105. const struct nfs_pageio_ops *pg_ops = &nfs_pageio_read_ops;
  106. #ifdef CONFIG_NFS_V4_1
  107. if (server->pnfs_curr_ld && !force_mds)
  108. pg_ops = server->pnfs_curr_ld->pg_read_ops;
  109. #endif
  110. nfs_pageio_init(pgio, inode, pg_ops, compl_ops, server->rsize, 0);
  111. }
  112. EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
  113. void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
  114. {
  115. pgio->pg_ops = &nfs_pageio_read_ops;
  116. pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
  117. }
  118. EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
  119. int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
  120. struct page *page)
  121. {
  122. struct nfs_page *new;
  123. unsigned int len;
  124. struct nfs_pageio_descriptor pgio;
  125. len = nfs_page_length(page);
  126. if (len == 0)
  127. return nfs_return_empty_page(page);
  128. new = nfs_create_request(ctx, inode, page, 0, len);
  129. if (IS_ERR(new)) {
  130. unlock_page(page);
  131. return PTR_ERR(new);
  132. }
  133. if (len < PAGE_CACHE_SIZE)
  134. zero_user_segment(page, len, PAGE_CACHE_SIZE);
  135. nfs_pageio_init_read(&pgio, inode, false,
  136. &nfs_async_read_completion_ops);
  137. nfs_pageio_add_request(&pgio, new);
  138. nfs_pageio_complete(&pgio);
  139. NFS_I(inode)->read_io += pgio.pg_bytes_written;
  140. return 0;
  141. }
  142. static void nfs_readpage_release(struct nfs_page *req)
  143. {
  144. struct inode *d_inode = req->wb_context->dentry->d_inode;
  145. if (PageUptodate(req->wb_page))
  146. nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
  147. unlock_page(req->wb_page);
  148. dprintk("NFS: read done (%s/%Lu %d@%Ld)\n",
  149. req->wb_context->dentry->d_inode->i_sb->s_id,
  150. (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode),
  151. req->wb_bytes,
  152. (long long)req_offset(req));
  153. nfs_release_request(req);
  154. }
  155. /* Note io was page aligned */
  156. static void nfs_read_completion(struct nfs_pgio_header *hdr)
  157. {
  158. unsigned long bytes = 0;
  159. if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
  160. goto out;
  161. while (!list_empty(&hdr->pages)) {
  162. struct nfs_page *req = nfs_list_entry(hdr->pages.next);
  163. struct page *page = req->wb_page;
  164. if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
  165. if (bytes > hdr->good_bytes)
  166. zero_user(page, 0, PAGE_SIZE);
  167. else if (hdr->good_bytes - bytes < PAGE_SIZE)
  168. zero_user_segment(page,
  169. hdr->good_bytes & ~PAGE_MASK,
  170. PAGE_SIZE);
  171. }
  172. bytes += req->wb_bytes;
  173. if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
  174. if (bytes <= hdr->good_bytes)
  175. SetPageUptodate(page);
  176. } else
  177. SetPageUptodate(page);
  178. nfs_list_remove_request(req);
  179. nfs_readpage_release(req);
  180. }
  181. out:
  182. hdr->release(hdr);
  183. }
  184. int nfs_initiate_read(struct rpc_clnt *clnt,
  185. struct nfs_pgio_data *data,
  186. const struct rpc_call_ops *call_ops, int flags)
  187. {
  188. struct inode *inode = data->header->inode;
  189. int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
  190. struct rpc_task *task;
  191. struct rpc_message msg = {
  192. .rpc_argp = &data->args,
  193. .rpc_resp = &data->res,
  194. .rpc_cred = data->header->cred,
  195. };
  196. struct rpc_task_setup task_setup_data = {
  197. .task = &data->task,
  198. .rpc_client = clnt,
  199. .rpc_message = &msg,
  200. .callback_ops = call_ops,
  201. .callback_data = data,
  202. .workqueue = nfsiod_workqueue,
  203. .flags = RPC_TASK_ASYNC | swap_flags | flags,
  204. };
  205. /* Set up the initial task struct. */
  206. NFS_PROTO(inode)->read_setup(data, &msg);
  207. dprintk("NFS: %5u initiated read call (req %s/%llu, %u bytes @ "
  208. "offset %llu)\n",
  209. data->task.tk_pid,
  210. inode->i_sb->s_id,
  211. (unsigned long long)NFS_FILEID(inode),
  212. data->args.count,
  213. (unsigned long long)data->args.offset);
  214. task = rpc_run_task(&task_setup_data);
  215. if (IS_ERR(task))
  216. return PTR_ERR(task);
  217. rpc_put_task(task);
  218. return 0;
  219. }
  220. EXPORT_SYMBOL_GPL(nfs_initiate_read);
  221. /*
  222. * Set up the NFS read request struct
  223. */
  224. static void nfs_read_rpcsetup(struct nfs_pgio_data *data,
  225. unsigned int count, unsigned int offset)
  226. {
  227. struct nfs_page *req = data->header->req;
  228. data->args.fh = NFS_FH(data->header->inode);
  229. data->args.offset = req_offset(req) + offset;
  230. data->args.pgbase = req->wb_pgbase + offset;
  231. data->args.pages = data->pages.pagevec;
  232. data->args.count = count;
  233. data->args.context = get_nfs_open_context(req->wb_context);
  234. data->args.lock_context = req->wb_lock_context;
  235. data->res.fattr = &data->fattr;
  236. data->res.count = count;
  237. data->res.eof = 0;
  238. nfs_fattr_init(&data->fattr);
  239. }
  240. static int nfs_do_read(struct nfs_pgio_data *data,
  241. const struct rpc_call_ops *call_ops)
  242. {
  243. struct inode *inode = data->header->inode;
  244. return nfs_initiate_read(NFS_CLIENT(inode), data, call_ops, 0);
  245. }
  246. static int
  247. nfs_do_multiple_reads(struct list_head *head,
  248. const struct rpc_call_ops *call_ops)
  249. {
  250. struct nfs_pgio_data *data;
  251. int ret = 0;
  252. while (!list_empty(head)) {
  253. int ret2;
  254. data = list_first_entry(head, struct nfs_pgio_data, list);
  255. list_del_init(&data->list);
  256. ret2 = nfs_do_read(data, call_ops);
  257. if (ret == 0)
  258. ret = ret2;
  259. }
  260. return ret;
  261. }
  262. static void
  263. nfs_async_read_error(struct list_head *head)
  264. {
  265. struct nfs_page *req;
  266. while (!list_empty(head)) {
  267. req = nfs_list_entry(head->next);
  268. nfs_list_remove_request(req);
  269. nfs_readpage_release(req);
  270. }
  271. }
  272. static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
  273. .error_cleanup = nfs_async_read_error,
  274. .completion = nfs_read_completion,
  275. };
  276. static void nfs_pagein_error(struct nfs_pageio_descriptor *desc,
  277. struct nfs_pgio_header *hdr)
  278. {
  279. set_bit(NFS_IOHDR_REDO, &hdr->flags);
  280. while (!list_empty(&hdr->rpc_list)) {
  281. struct nfs_pgio_data *data = list_first_entry(&hdr->rpc_list,
  282. struct nfs_pgio_data, list);
  283. list_del(&data->list);
  284. nfs_readdata_release(data);
  285. }
  286. desc->pg_completion_ops->error_cleanup(&desc->pg_list);
  287. }
  288. /*
  289. * Generate multiple requests to fill a single page.
  290. *
  291. * We optimize to reduce the number of read operations on the wire. If we
  292. * detect that we're reading a page, or an area of a page, that is past the
  293. * end of file, we do not generate NFS read operations but just clear the
  294. * parts of the page that would have come back zero from the server anyway.
  295. *
  296. * We rely on the cached value of i_size to make this determination; another
  297. * client can fill pages on the server past our cached end-of-file, but we
  298. * won't see the new data until our attribute cache is updated. This is more
  299. * or less conventional NFS client behavior.
  300. */
  301. static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc,
  302. struct nfs_pgio_header *hdr)
  303. {
  304. struct nfs_page *req = hdr->req;
  305. struct page *page = req->wb_page;
  306. struct nfs_pgio_data *data;
  307. size_t rsize = desc->pg_bsize, nbytes;
  308. unsigned int offset;
  309. offset = 0;
  310. nbytes = desc->pg_count;
  311. do {
  312. size_t len = min(nbytes,rsize);
  313. data = nfs_readdata_alloc(hdr, 1);
  314. if (!data) {
  315. nfs_pagein_error(desc, hdr);
  316. return -ENOMEM;
  317. }
  318. data->pages.pagevec[0] = page;
  319. nfs_read_rpcsetup(data, len, offset);
  320. list_add(&data->list, &hdr->rpc_list);
  321. nbytes -= len;
  322. offset += len;
  323. } while (nbytes != 0);
  324. nfs_list_remove_request(req);
  325. nfs_list_add_request(req, &hdr->pages);
  326. desc->pg_rpc_callops = &nfs_read_common_ops;
  327. return 0;
  328. }
  329. static int nfs_pagein_one(struct nfs_pageio_descriptor *desc,
  330. struct nfs_pgio_header *hdr)
  331. {
  332. struct nfs_page *req;
  333. struct page **pages;
  334. struct nfs_pgio_data *data;
  335. struct list_head *head = &desc->pg_list;
  336. data = nfs_readdata_alloc(hdr, nfs_page_array_len(desc->pg_base,
  337. desc->pg_count));
  338. if (!data) {
  339. nfs_pagein_error(desc, hdr);
  340. return -ENOMEM;
  341. }
  342. pages = data->pages.pagevec;
  343. while (!list_empty(head)) {
  344. req = nfs_list_entry(head->next);
  345. nfs_list_remove_request(req);
  346. nfs_list_add_request(req, &hdr->pages);
  347. *pages++ = req->wb_page;
  348. }
  349. nfs_read_rpcsetup(data, desc->pg_count, 0);
  350. list_add(&data->list, &hdr->rpc_list);
  351. desc->pg_rpc_callops = &nfs_read_common_ops;
  352. return 0;
  353. }
  354. int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
  355. struct nfs_pgio_header *hdr)
  356. {
  357. if (desc->pg_bsize < PAGE_CACHE_SIZE)
  358. return nfs_pagein_multi(desc, hdr);
  359. return nfs_pagein_one(desc, hdr);
  360. }
  361. EXPORT_SYMBOL_GPL(nfs_generic_pagein);
  362. static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
  363. {
  364. struct nfs_rw_header *rhdr;
  365. struct nfs_pgio_header *hdr;
  366. int ret;
  367. rhdr = nfs_readhdr_alloc();
  368. if (!rhdr) {
  369. desc->pg_completion_ops->error_cleanup(&desc->pg_list);
  370. return -ENOMEM;
  371. }
  372. hdr = &rhdr->header;
  373. nfs_pgheader_init(desc, hdr, nfs_readhdr_free);
  374. atomic_inc(&hdr->refcnt);
  375. ret = nfs_generic_pagein(desc, hdr);
  376. if (ret == 0)
  377. ret = nfs_do_multiple_reads(&hdr->rpc_list,
  378. desc->pg_rpc_callops);
  379. if (atomic_dec_and_test(&hdr->refcnt))
  380. hdr->completion_ops->completion(hdr);
  381. return ret;
  382. }
  383. static const struct nfs_pageio_ops nfs_pageio_read_ops = {
  384. .pg_test = nfs_generic_pg_test,
  385. .pg_doio = nfs_generic_pg_readpages,
  386. };
  387. /*
  388. * This is the callback from RPC telling us whether a reply was
  389. * received or some error occurred (timeout or socket shutdown).
  390. */
  391. int nfs_readpage_result(struct rpc_task *task, struct nfs_pgio_data *data)
  392. {
  393. struct inode *inode = data->header->inode;
  394. int status;
  395. dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
  396. task->tk_status);
  397. status = NFS_PROTO(inode)->read_done(task, data);
  398. if (status != 0)
  399. return status;
  400. nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count);
  401. if (task->tk_status == -ESTALE) {
  402. set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
  403. nfs_mark_for_revalidate(inode);
  404. }
  405. return 0;
  406. }
  407. static void nfs_readpage_retry(struct rpc_task *task, struct nfs_pgio_data *data)
  408. {
  409. struct nfs_pgio_args *argp = &data->args;
  410. struct nfs_pgio_res *resp = &data->res;
  411. /* This is a short read! */
  412. nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD);
  413. /* Has the server at least made some progress? */
  414. if (resp->count == 0) {
  415. nfs_set_pgio_error(data->header, -EIO, argp->offset);
  416. return;
  417. }
  418. /* Yes, so retry the read at the end of the data */
  419. data->mds_offset += resp->count;
  420. argp->offset += resp->count;
  421. argp->pgbase += resp->count;
  422. argp->count -= resp->count;
  423. rpc_restart_call_prepare(task);
  424. }
  425. static void nfs_readpage_result_common(struct rpc_task *task, void *calldata)
  426. {
  427. struct nfs_pgio_data *data = calldata;
  428. struct nfs_pgio_header *hdr = data->header;
  429. /* Note the only returns of nfs_readpage_result are 0 and -EAGAIN */
  430. if (nfs_readpage_result(task, data) != 0)
  431. return;
  432. if (task->tk_status < 0)
  433. nfs_set_pgio_error(hdr, task->tk_status, data->args.offset);
  434. else if (data->res.eof) {
  435. loff_t bound;
  436. bound = data->args.offset + data->res.count;
  437. spin_lock(&hdr->lock);
  438. if (bound < hdr->io_start + hdr->good_bytes) {
  439. set_bit(NFS_IOHDR_EOF, &hdr->flags);
  440. clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
  441. hdr->good_bytes = bound - hdr->io_start;
  442. }
  443. spin_unlock(&hdr->lock);
  444. } else if (data->res.count != data->args.count)
  445. nfs_readpage_retry(task, data);
  446. }
  447. static void nfs_readpage_release_common(void *calldata)
  448. {
  449. nfs_readdata_release(calldata);
  450. }
  451. void nfs_read_prepare(struct rpc_task *task, void *calldata)
  452. {
  453. struct nfs_pgio_data *data = calldata;
  454. int err;
  455. err = NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data);
  456. if (err)
  457. rpc_exit(task, err);
  458. }
  459. static const struct rpc_call_ops nfs_read_common_ops = {
  460. .rpc_call_prepare = nfs_read_prepare,
  461. .rpc_call_done = nfs_readpage_result_common,
  462. .rpc_release = nfs_readpage_release_common,
  463. };
  464. /*
  465. * Read a page over NFS.
  466. * We read the page synchronously in the following case:
  467. * - The error flag is set for this page. This happens only when a
  468. * previous async read operation failed.
  469. */
  470. int nfs_readpage(struct file *file, struct page *page)
  471. {
  472. struct nfs_open_context *ctx;
  473. struct inode *inode = page_file_mapping(page)->host;
  474. int error;
  475. dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
  476. page, PAGE_CACHE_SIZE, page_file_index(page));
  477. nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
  478. nfs_add_stats(inode, NFSIOS_READPAGES, 1);
  479. /*
  480. * Try to flush any pending writes to the file..
  481. *
  482. * NOTE! Because we own the page lock, there cannot
  483. * be any new pending writes generated at this point
  484. * for this page (other pages can be written to).
  485. */
  486. error = nfs_wb_page(inode, page);
  487. if (error)
  488. goto out_unlock;
  489. if (PageUptodate(page))
  490. goto out_unlock;
  491. error = -ESTALE;
  492. if (NFS_STALE(inode))
  493. goto out_unlock;
  494. if (file == NULL) {
  495. error = -EBADF;
  496. ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
  497. if (ctx == NULL)
  498. goto out_unlock;
  499. } else
  500. ctx = get_nfs_open_context(nfs_file_open_context(file));
  501. if (!IS_SYNC(inode)) {
  502. error = nfs_readpage_from_fscache(ctx, inode, page);
  503. if (error == 0)
  504. goto out;
  505. }
  506. error = nfs_readpage_async(ctx, inode, page);
  507. out:
  508. put_nfs_open_context(ctx);
  509. return error;
  510. out_unlock:
  511. unlock_page(page);
  512. return error;
  513. }
  514. struct nfs_readdesc {
  515. struct nfs_pageio_descriptor *pgio;
  516. struct nfs_open_context *ctx;
  517. };
  518. static int
  519. readpage_async_filler(void *data, struct page *page)
  520. {
  521. struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
  522. struct inode *inode = page_file_mapping(page)->host;
  523. struct nfs_page *new;
  524. unsigned int len;
  525. int error;
  526. len = nfs_page_length(page);
  527. if (len == 0)
  528. return nfs_return_empty_page(page);
  529. new = nfs_create_request(desc->ctx, inode, page, 0, len);
  530. if (IS_ERR(new))
  531. goto out_error;
  532. if (len < PAGE_CACHE_SIZE)
  533. zero_user_segment(page, len, PAGE_CACHE_SIZE);
  534. if (!nfs_pageio_add_request(desc->pgio, new)) {
  535. error = desc->pgio->pg_error;
  536. goto out_unlock;
  537. }
  538. return 0;
  539. out_error:
  540. error = PTR_ERR(new);
  541. out_unlock:
  542. unlock_page(page);
  543. return error;
  544. }
  545. int nfs_readpages(struct file *filp, struct address_space *mapping,
  546. struct list_head *pages, unsigned nr_pages)
  547. {
  548. struct nfs_pageio_descriptor pgio;
  549. struct nfs_readdesc desc = {
  550. .pgio = &pgio,
  551. };
  552. struct inode *inode = mapping->host;
  553. unsigned long npages;
  554. int ret = -ESTALE;
  555. dprintk("NFS: nfs_readpages (%s/%Lu %d)\n",
  556. inode->i_sb->s_id,
  557. (unsigned long long)NFS_FILEID(inode),
  558. nr_pages);
  559. nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
  560. if (NFS_STALE(inode))
  561. goto out;
  562. if (filp == NULL) {
  563. desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
  564. if (desc.ctx == NULL)
  565. return -EBADF;
  566. } else
  567. desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
  568. /* attempt to read as many of the pages as possible from the cache
  569. * - this returns -ENOBUFS immediately if the cookie is negative
  570. */
  571. ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
  572. pages, &nr_pages);
  573. if (ret == 0)
  574. goto read_complete; /* all pages were read */
  575. nfs_pageio_init_read(&pgio, inode, false,
  576. &nfs_async_read_completion_ops);
  577. ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
  578. nfs_pageio_complete(&pgio);
  579. NFS_I(inode)->read_io += pgio.pg_bytes_written;
  580. npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  581. nfs_add_stats(inode, NFSIOS_READPAGES, npages);
  582. read_complete:
  583. put_nfs_open_context(desc.ctx);
  584. out:
  585. return ret;
  586. }
  587. int __init nfs_init_readpagecache(void)
  588. {
  589. nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
  590. sizeof(struct nfs_rw_header),
  591. 0, SLAB_HWCACHE_ALIGN,
  592. NULL);
  593. if (nfs_rdata_cachep == NULL)
  594. return -ENOMEM;
  595. return 0;
  596. }
  597. void nfs_destroy_readpagecache(void)
  598. {
  599. kmem_cache_destroy(nfs_rdata_cachep);
  600. }