blocklayout.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923
  1. /*
  2. * linux/fs/nfs/blocklayout/blocklayout.c
  3. *
  4. * Module for the NFSv4.1 pNFS block layout driver.
  5. *
  6. * Copyright (c) 2006 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Andy Adamson <andros@citi.umich.edu>
  10. * Fred Isaman <iisaman@umich.edu>
  11. *
  12. * permission is granted to use, copy, create derivative works and
  13. * redistribute this software and such derivative works for any purpose,
  14. * so long as the name of the university of michigan is not used in
  15. * any advertising or publicity pertaining to the use or distribution
  16. * of this software without specific, written prior authorization. if
  17. * the above copyright notice or any other identification of the
  18. * university of michigan is included in any copy of any portion of
  19. * this software, then the disclaimer below must also be included.
  20. *
  21. * this software is provided as is, without representation from the
  22. * university of michigan as to its fitness for any purpose, and without
  23. * warranty by the university of michigan of any kind, either express
  24. * or implied, including without limitation the implied warranties of
  25. * merchantability and fitness for a particular purpose. the regents
  26. * of the university of michigan shall not be liable for any damages,
  27. * including special, indirect, incidental, or consequential damages,
  28. * with respect to any claim arising out or in connection with the use
  29. * of the software, even if it has been or is hereafter advised of the
  30. * possibility of such damages.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/init.h>
  34. #include <linux/mount.h>
  35. #include <linux/namei.h>
  36. #include <linux/bio.h> /* struct bio */
  37. #include <linux/prefetch.h>
  38. #include <linux/pagevec.h>
  39. #include "../pnfs.h"
  40. #include "../nfs4session.h"
  41. #include "../internal.h"
  42. #include "blocklayout.h"
  43. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  44. MODULE_LICENSE("GPL");
  45. MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
  46. MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
  47. static bool is_hole(struct pnfs_block_extent *be)
  48. {
  49. switch (be->be_state) {
  50. case PNFS_BLOCK_NONE_DATA:
  51. return true;
  52. case PNFS_BLOCK_INVALID_DATA:
  53. return be->be_tag ? false : true;
  54. default:
  55. return false;
  56. }
  57. }
  58. /* The data we are handed might be spread across several bios. We need
  59. * to track when the last one is finished.
  60. */
  61. struct parallel_io {
  62. struct kref refcnt;
  63. void (*pnfs_callback) (void *data);
  64. void *data;
  65. };
  66. static inline struct parallel_io *alloc_parallel(void *data)
  67. {
  68. struct parallel_io *rv;
  69. rv = kmalloc(sizeof(*rv), GFP_NOFS);
  70. if (rv) {
  71. rv->data = data;
  72. kref_init(&rv->refcnt);
  73. }
  74. return rv;
  75. }
  76. static inline void get_parallel(struct parallel_io *p)
  77. {
  78. kref_get(&p->refcnt);
  79. }
  80. static void destroy_parallel(struct kref *kref)
  81. {
  82. struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
  83. dprintk("%s enter\n", __func__);
  84. p->pnfs_callback(p->data);
  85. kfree(p);
  86. }
  87. static inline void put_parallel(struct parallel_io *p)
  88. {
  89. kref_put(&p->refcnt, destroy_parallel);
  90. }
  91. static struct bio *
  92. bl_submit_bio(int rw, struct bio *bio)
  93. {
  94. if (bio) {
  95. get_parallel(bio->bi_private);
  96. dprintk("%s submitting %s bio %u@%llu\n", __func__,
  97. rw == READ ? "read" : "write", bio->bi_iter.bi_size,
  98. (unsigned long long)bio->bi_iter.bi_sector);
  99. submit_bio(rw, bio);
  100. }
  101. return NULL;
  102. }
  103. static struct bio *
  104. bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
  105. bio_end_io_t end_io, struct parallel_io *par)
  106. {
  107. struct bio *bio;
  108. npg = min(npg, BIO_MAX_PAGES);
  109. bio = bio_alloc(GFP_NOIO, npg);
  110. if (!bio && (current->flags & PF_MEMALLOC)) {
  111. while (!bio && (npg /= 2))
  112. bio = bio_alloc(GFP_NOIO, npg);
  113. }
  114. if (bio) {
  115. bio->bi_iter.bi_sector = disk_sector;
  116. bio->bi_bdev = bdev;
  117. bio->bi_end_io = end_io;
  118. bio->bi_private = par;
  119. }
  120. return bio;
  121. }
  122. static struct bio *
  123. do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
  124. struct page *page, struct pnfs_block_dev_map *map,
  125. struct pnfs_block_extent *be, bio_end_io_t end_io,
  126. struct parallel_io *par, unsigned int offset, int *len)
  127. {
  128. struct pnfs_block_dev *dev =
  129. container_of(be->be_device, struct pnfs_block_dev, node);
  130. u64 disk_addr, end;
  131. dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
  132. npg, rw, (unsigned long long)isect, offset, *len);
  133. /* translate to device offset */
  134. isect += be->be_v_offset;
  135. isect -= be->be_f_offset;
  136. /* translate to physical disk offset */
  137. disk_addr = (u64)isect << SECTOR_SHIFT;
  138. if (disk_addr < map->start || disk_addr >= map->start + map->len) {
  139. if (!dev->map(dev, disk_addr, map))
  140. return ERR_PTR(-EIO);
  141. bio = bl_submit_bio(rw, bio);
  142. }
  143. disk_addr += map->disk_offset;
  144. disk_addr -= map->start;
  145. /* limit length to what the device mapping allows */
  146. end = disk_addr + *len;
  147. if (end >= map->start + map->len)
  148. *len = map->start + map->len - disk_addr;
  149. retry:
  150. if (!bio) {
  151. bio = bl_alloc_init_bio(npg, map->bdev,
  152. disk_addr >> SECTOR_SHIFT, end_io, par);
  153. if (!bio)
  154. return ERR_PTR(-ENOMEM);
  155. }
  156. if (bio_add_page(bio, page, *len, offset) < *len) {
  157. bio = bl_submit_bio(rw, bio);
  158. goto retry;
  159. }
  160. return bio;
  161. }
  162. static void bl_end_io_read(struct bio *bio)
  163. {
  164. struct parallel_io *par = bio->bi_private;
  165. if (bio->bi_error) {
  166. struct nfs_pgio_header *header = par->data;
  167. if (!header->pnfs_error)
  168. header->pnfs_error = -EIO;
  169. pnfs_set_lo_fail(header->lseg);
  170. }
  171. bio_put(bio);
  172. put_parallel(par);
  173. }
  174. static void bl_read_cleanup(struct work_struct *work)
  175. {
  176. struct rpc_task *task;
  177. struct nfs_pgio_header *hdr;
  178. dprintk("%s enter\n", __func__);
  179. task = container_of(work, struct rpc_task, u.tk_work);
  180. hdr = container_of(task, struct nfs_pgio_header, task);
  181. pnfs_ld_read_done(hdr);
  182. }
  183. static void
  184. bl_end_par_io_read(void *data)
  185. {
  186. struct nfs_pgio_header *hdr = data;
  187. hdr->task.tk_status = hdr->pnfs_error;
  188. INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
  189. schedule_work(&hdr->task.u.tk_work);
  190. }
  191. static enum pnfs_try_status
  192. bl_read_pagelist(struct nfs_pgio_header *header)
  193. {
  194. struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
  195. struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
  196. struct bio *bio = NULL;
  197. struct pnfs_block_extent be;
  198. sector_t isect, extent_length = 0;
  199. struct parallel_io *par;
  200. loff_t f_offset = header->args.offset;
  201. size_t bytes_left = header->args.count;
  202. unsigned int pg_offset = header->args.pgbase, pg_len;
  203. struct page **pages = header->args.pages;
  204. int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
  205. const bool is_dio = (header->dreq != NULL);
  206. struct blk_plug plug;
  207. int i;
  208. dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
  209. header->page_array.npages, f_offset,
  210. (unsigned int)header->args.count);
  211. par = alloc_parallel(header);
  212. if (!par)
  213. return PNFS_NOT_ATTEMPTED;
  214. par->pnfs_callback = bl_end_par_io_read;
  215. blk_start_plug(&plug);
  216. isect = (sector_t) (f_offset >> SECTOR_SHIFT);
  217. /* Code assumes extents are page-aligned */
  218. for (i = pg_index; i < header->page_array.npages; i++) {
  219. if (extent_length <= 0) {
  220. /* We've used up the previous extent */
  221. bio = bl_submit_bio(READ, bio);
  222. /* Get the next one */
  223. if (!ext_tree_lookup(bl, isect, &be, false)) {
  224. header->pnfs_error = -EIO;
  225. goto out;
  226. }
  227. extent_length = be.be_length - (isect - be.be_f_offset);
  228. }
  229. if (is_dio) {
  230. if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
  231. pg_len = PAGE_CACHE_SIZE - pg_offset;
  232. else
  233. pg_len = bytes_left;
  234. } else {
  235. BUG_ON(pg_offset != 0);
  236. pg_len = PAGE_CACHE_SIZE;
  237. }
  238. if (is_hole(&be)) {
  239. bio = bl_submit_bio(READ, bio);
  240. /* Fill hole w/ zeroes w/o accessing device */
  241. dprintk("%s Zeroing page for hole\n", __func__);
  242. zero_user_segment(pages[i], pg_offset, pg_len);
  243. /* invalidate map */
  244. map.start = NFS4_MAX_UINT64;
  245. } else {
  246. bio = do_add_page_to_bio(bio,
  247. header->page_array.npages - i,
  248. READ,
  249. isect, pages[i], &map, &be,
  250. bl_end_io_read, par,
  251. pg_offset, &pg_len);
  252. if (IS_ERR(bio)) {
  253. header->pnfs_error = PTR_ERR(bio);
  254. bio = NULL;
  255. goto out;
  256. }
  257. }
  258. isect += (pg_len >> SECTOR_SHIFT);
  259. extent_length -= (pg_len >> SECTOR_SHIFT);
  260. f_offset += pg_len;
  261. bytes_left -= pg_len;
  262. pg_offset = 0;
  263. }
  264. if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
  265. header->res.eof = 1;
  266. header->res.count = header->inode->i_size - header->args.offset;
  267. } else {
  268. header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
  269. }
  270. out:
  271. bl_submit_bio(READ, bio);
  272. blk_finish_plug(&plug);
  273. put_parallel(par);
  274. return PNFS_ATTEMPTED;
  275. }
  276. static void bl_end_io_write(struct bio *bio)
  277. {
  278. struct parallel_io *par = bio->bi_private;
  279. struct nfs_pgio_header *header = par->data;
  280. if (bio->bi_error) {
  281. if (!header->pnfs_error)
  282. header->pnfs_error = -EIO;
  283. pnfs_set_lo_fail(header->lseg);
  284. }
  285. bio_put(bio);
  286. put_parallel(par);
  287. }
  288. /* Function scheduled for call during bl_end_par_io_write,
  289. * it marks sectors as written and extends the commitlist.
  290. */
  291. static void bl_write_cleanup(struct work_struct *work)
  292. {
  293. struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
  294. struct nfs_pgio_header *hdr =
  295. container_of(task, struct nfs_pgio_header, task);
  296. dprintk("%s enter\n", __func__);
  297. if (likely(!hdr->pnfs_error)) {
  298. struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
  299. u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK;
  300. u64 end = (hdr->args.offset + hdr->args.count +
  301. PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK;
  302. ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
  303. (end - start) >> SECTOR_SHIFT);
  304. }
  305. pnfs_ld_write_done(hdr);
  306. }
  307. /* Called when last of bios associated with a bl_write_pagelist call finishes */
  308. static void bl_end_par_io_write(void *data)
  309. {
  310. struct nfs_pgio_header *hdr = data;
  311. hdr->task.tk_status = hdr->pnfs_error;
  312. hdr->verf.committed = NFS_FILE_SYNC;
  313. INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
  314. schedule_work(&hdr->task.u.tk_work);
  315. }
  316. static enum pnfs_try_status
  317. bl_write_pagelist(struct nfs_pgio_header *header, int sync)
  318. {
  319. struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
  320. struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
  321. struct bio *bio = NULL;
  322. struct pnfs_block_extent be;
  323. sector_t isect, extent_length = 0;
  324. struct parallel_io *par = NULL;
  325. loff_t offset = header->args.offset;
  326. size_t count = header->args.count;
  327. struct page **pages = header->args.pages;
  328. int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
  329. unsigned int pg_len;
  330. struct blk_plug plug;
  331. int i;
  332. dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
  333. /* At this point, header->page_aray is a (sequential) list of nfs_pages.
  334. * We want to write each, and if there is an error set pnfs_error
  335. * to have it redone using nfs.
  336. */
  337. par = alloc_parallel(header);
  338. if (!par)
  339. return PNFS_NOT_ATTEMPTED;
  340. par->pnfs_callback = bl_end_par_io_write;
  341. blk_start_plug(&plug);
  342. /* we always write out the whole page */
  343. offset = offset & (loff_t)PAGE_CACHE_MASK;
  344. isect = offset >> SECTOR_SHIFT;
  345. for (i = pg_index; i < header->page_array.npages; i++) {
  346. if (extent_length <= 0) {
  347. /* We've used up the previous extent */
  348. bio = bl_submit_bio(WRITE, bio);
  349. /* Get the next one */
  350. if (!ext_tree_lookup(bl, isect, &be, true)) {
  351. header->pnfs_error = -EINVAL;
  352. goto out;
  353. }
  354. extent_length = be.be_length - (isect - be.be_f_offset);
  355. }
  356. pg_len = PAGE_CACHE_SIZE;
  357. bio = do_add_page_to_bio(bio, header->page_array.npages - i,
  358. WRITE, isect, pages[i], &map, &be,
  359. bl_end_io_write, par,
  360. 0, &pg_len);
  361. if (IS_ERR(bio)) {
  362. header->pnfs_error = PTR_ERR(bio);
  363. bio = NULL;
  364. goto out;
  365. }
  366. offset += pg_len;
  367. count -= pg_len;
  368. isect += (pg_len >> SECTOR_SHIFT);
  369. extent_length -= (pg_len >> SECTOR_SHIFT);
  370. }
  371. header->res.count = header->args.count;
  372. out:
  373. bl_submit_bio(WRITE, bio);
  374. blk_finish_plug(&plug);
  375. put_parallel(par);
  376. return PNFS_ATTEMPTED;
  377. }
  378. static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
  379. {
  380. struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
  381. int err;
  382. dprintk("%s enter\n", __func__);
  383. err = ext_tree_remove(bl, true, 0, LLONG_MAX);
  384. WARN_ON(err);
  385. kfree(bl);
  386. }
  387. static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
  388. gfp_t gfp_flags)
  389. {
  390. struct pnfs_block_layout *bl;
  391. dprintk("%s enter\n", __func__);
  392. bl = kzalloc(sizeof(*bl), gfp_flags);
  393. if (!bl)
  394. return NULL;
  395. bl->bl_ext_rw = RB_ROOT;
  396. bl->bl_ext_ro = RB_ROOT;
  397. spin_lock_init(&bl->bl_ext_lock);
  398. return &bl->bl_layout;
  399. }
  400. static void bl_free_lseg(struct pnfs_layout_segment *lseg)
  401. {
  402. dprintk("%s enter\n", __func__);
  403. kfree(lseg);
  404. }
  405. /* Tracks info needed to ensure extents in layout obey constraints of spec */
  406. struct layout_verification {
  407. u32 mode; /* R or RW */
  408. u64 start; /* Expected start of next non-COW extent */
  409. u64 inval; /* Start of INVAL coverage */
  410. u64 cowread; /* End of COW read coverage */
  411. };
  412. /* Verify the extent meets the layout requirements of the pnfs-block draft,
  413. * section 2.3.1.
  414. */
  415. static int verify_extent(struct pnfs_block_extent *be,
  416. struct layout_verification *lv)
  417. {
  418. if (lv->mode == IOMODE_READ) {
  419. if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
  420. be->be_state == PNFS_BLOCK_INVALID_DATA)
  421. return -EIO;
  422. if (be->be_f_offset != lv->start)
  423. return -EIO;
  424. lv->start += be->be_length;
  425. return 0;
  426. }
  427. /* lv->mode == IOMODE_RW */
  428. if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
  429. if (be->be_f_offset != lv->start)
  430. return -EIO;
  431. if (lv->cowread > lv->start)
  432. return -EIO;
  433. lv->start += be->be_length;
  434. lv->inval = lv->start;
  435. return 0;
  436. } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  437. if (be->be_f_offset != lv->start)
  438. return -EIO;
  439. lv->start += be->be_length;
  440. return 0;
  441. } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
  442. if (be->be_f_offset > lv->start)
  443. return -EIO;
  444. if (be->be_f_offset < lv->inval)
  445. return -EIO;
  446. if (be->be_f_offset < lv->cowread)
  447. return -EIO;
  448. /* It looks like you might want to min this with lv->start,
  449. * but you really don't.
  450. */
  451. lv->inval = lv->inval + be->be_length;
  452. lv->cowread = be->be_f_offset + be->be_length;
  453. return 0;
  454. } else
  455. return -EIO;
  456. }
  457. static int decode_sector_number(__be32 **rp, sector_t *sp)
  458. {
  459. uint64_t s;
  460. *rp = xdr_decode_hyper(*rp, &s);
  461. if (s & 0x1ff) {
  462. printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
  463. return -1;
  464. }
  465. *sp = s >> SECTOR_SHIFT;
  466. return 0;
  467. }
  468. static int
  469. bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
  470. struct layout_verification *lv, struct list_head *extents,
  471. gfp_t gfp_mask)
  472. {
  473. struct pnfs_block_extent *be;
  474. struct nfs4_deviceid id;
  475. int error;
  476. __be32 *p;
  477. p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
  478. if (!p)
  479. return -EIO;
  480. be = kzalloc(sizeof(*be), GFP_NOFS);
  481. if (!be)
  482. return -ENOMEM;
  483. memcpy(&id, p, NFS4_DEVICEID4_SIZE);
  484. p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
  485. error = -EIO;
  486. be->be_device = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
  487. lo->plh_lc_cred, gfp_mask);
  488. if (!be->be_device)
  489. goto out_free_be;
  490. /*
  491. * The next three values are read in as bytes, but stored in the
  492. * extent structure in 512-byte granularity.
  493. */
  494. if (decode_sector_number(&p, &be->be_f_offset) < 0)
  495. goto out_put_deviceid;
  496. if (decode_sector_number(&p, &be->be_length) < 0)
  497. goto out_put_deviceid;
  498. if (decode_sector_number(&p, &be->be_v_offset) < 0)
  499. goto out_put_deviceid;
  500. be->be_state = be32_to_cpup(p++);
  501. error = verify_extent(be, lv);
  502. if (error) {
  503. dprintk("%s: extent verification failed\n", __func__);
  504. goto out_put_deviceid;
  505. }
  506. list_add_tail(&be->be_list, extents);
  507. return 0;
  508. out_put_deviceid:
  509. nfs4_put_deviceid_node(be->be_device);
  510. out_free_be:
  511. kfree(be);
  512. return error;
  513. }
  514. static struct pnfs_layout_segment *
  515. bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
  516. gfp_t gfp_mask)
  517. {
  518. struct layout_verification lv = {
  519. .mode = lgr->range.iomode,
  520. .start = lgr->range.offset >> SECTOR_SHIFT,
  521. .inval = lgr->range.offset >> SECTOR_SHIFT,
  522. .cowread = lgr->range.offset >> SECTOR_SHIFT,
  523. };
  524. struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
  525. struct pnfs_layout_segment *lseg;
  526. struct xdr_buf buf;
  527. struct xdr_stream xdr;
  528. struct page *scratch;
  529. int status, i;
  530. uint32_t count;
  531. __be32 *p;
  532. LIST_HEAD(extents);
  533. dprintk("---> %s\n", __func__);
  534. lseg = kzalloc(sizeof(*lseg), gfp_mask);
  535. if (!lseg)
  536. return ERR_PTR(-ENOMEM);
  537. status = -ENOMEM;
  538. scratch = alloc_page(gfp_mask);
  539. if (!scratch)
  540. goto out;
  541. xdr_init_decode_pages(&xdr, &buf,
  542. lgr->layoutp->pages, lgr->layoutp->len);
  543. xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE);
  544. status = -EIO;
  545. p = xdr_inline_decode(&xdr, 4);
  546. if (unlikely(!p))
  547. goto out_free_scratch;
  548. count = be32_to_cpup(p++);
  549. dprintk("%s: number of extents %d\n", __func__, count);
  550. /*
  551. * Decode individual extents, putting them in temporary staging area
  552. * until whole layout is decoded to make error recovery easier.
  553. */
  554. for (i = 0; i < count; i++) {
  555. status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
  556. if (status)
  557. goto process_extents;
  558. }
  559. if (lgr->range.offset + lgr->range.length !=
  560. lv.start << SECTOR_SHIFT) {
  561. dprintk("%s Final length mismatch\n", __func__);
  562. status = -EIO;
  563. goto process_extents;
  564. }
  565. if (lv.start < lv.cowread) {
  566. dprintk("%s Final uncovered COW extent\n", __func__);
  567. status = -EIO;
  568. }
  569. process_extents:
  570. while (!list_empty(&extents)) {
  571. struct pnfs_block_extent *be =
  572. list_first_entry(&extents, struct pnfs_block_extent,
  573. be_list);
  574. list_del(&be->be_list);
  575. if (!status)
  576. status = ext_tree_insert(bl, be);
  577. if (status) {
  578. nfs4_put_deviceid_node(be->be_device);
  579. kfree(be);
  580. }
  581. }
  582. out_free_scratch:
  583. __free_page(scratch);
  584. out:
  585. dprintk("%s returns %d\n", __func__, status);
  586. if (status) {
  587. kfree(lseg);
  588. return ERR_PTR(status);
  589. }
  590. return lseg;
  591. }
  592. static void
  593. bl_return_range(struct pnfs_layout_hdr *lo,
  594. struct pnfs_layout_range *range)
  595. {
  596. struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
  597. sector_t offset = range->offset >> SECTOR_SHIFT, end;
  598. if (range->offset % 8) {
  599. dprintk("%s: offset %lld not block size aligned\n",
  600. __func__, range->offset);
  601. return;
  602. }
  603. if (range->length != NFS4_MAX_UINT64) {
  604. if (range->length % 8) {
  605. dprintk("%s: length %lld not block size aligned\n",
  606. __func__, range->length);
  607. return;
  608. }
  609. end = offset + (range->length >> SECTOR_SHIFT);
  610. } else {
  611. end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
  612. }
  613. ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
  614. }
  615. static int
  616. bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
  617. {
  618. return ext_tree_prepare_commit(arg);
  619. }
  620. static void
  621. bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
  622. {
  623. ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
  624. }
  625. static int
  626. bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
  627. {
  628. dprintk("%s enter\n", __func__);
  629. if (server->pnfs_blksize == 0) {
  630. dprintk("%s Server did not return blksize\n", __func__);
  631. return -EINVAL;
  632. }
  633. if (server->pnfs_blksize > PAGE_SIZE) {
  634. printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
  635. __func__, server->pnfs_blksize);
  636. return -EINVAL;
  637. }
  638. return 0;
  639. }
  640. static bool
  641. is_aligned_req(struct nfs_pageio_descriptor *pgio,
  642. struct nfs_page *req, unsigned int alignment)
  643. {
  644. /*
  645. * Always accept buffered writes, higher layers take care of the
  646. * right alignment.
  647. */
  648. if (pgio->pg_dreq == NULL)
  649. return true;
  650. if (!IS_ALIGNED(req->wb_offset, alignment))
  651. return false;
  652. if (IS_ALIGNED(req->wb_bytes, alignment))
  653. return true;
  654. if (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode)) {
  655. /*
  656. * If the write goes up to the inode size, just write
  657. * the full page. Data past the inode size is
  658. * guaranteed to be zeroed by the higher level client
  659. * code, and this behaviour is mandated by RFC 5663
  660. * section 2.3.2.
  661. */
  662. return true;
  663. }
  664. return false;
  665. }
  666. static void
  667. bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  668. {
  669. if (!is_aligned_req(pgio, req, SECTOR_SIZE)) {
  670. nfs_pageio_reset_read_mds(pgio);
  671. return;
  672. }
  673. pnfs_generic_pg_init_read(pgio, req);
  674. }
  675. /*
  676. * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
  677. * of bytes (maximum @req->wb_bytes) that can be coalesced.
  678. */
  679. static size_t
  680. bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
  681. struct nfs_page *req)
  682. {
  683. if (!is_aligned_req(pgio, req, SECTOR_SIZE))
  684. return 0;
  685. return pnfs_generic_pg_test(pgio, prev, req);
  686. }
  687. /*
  688. * Return the number of contiguous bytes for a given inode
  689. * starting at page frame idx.
  690. */
  691. static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
  692. {
  693. struct address_space *mapping = inode->i_mapping;
  694. pgoff_t end;
  695. /* Optimize common case that writes from 0 to end of file */
  696. end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
  697. if (end != inode->i_mapping->nrpages) {
  698. rcu_read_lock();
  699. end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
  700. rcu_read_unlock();
  701. }
  702. if (!end)
  703. return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
  704. else
  705. return (end - idx) << PAGE_CACHE_SHIFT;
  706. }
  707. static void
  708. bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  709. {
  710. u64 wb_size;
  711. if (!is_aligned_req(pgio, req, PAGE_SIZE)) {
  712. nfs_pageio_reset_write_mds(pgio);
  713. return;
  714. }
  715. if (pgio->pg_dreq == NULL)
  716. wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
  717. req->wb_index);
  718. else
  719. wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
  720. pnfs_generic_pg_init_write(pgio, req, wb_size);
  721. }
  722. /*
  723. * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
  724. * of bytes (maximum @req->wb_bytes) that can be coalesced.
  725. */
  726. static size_t
  727. bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
  728. struct nfs_page *req)
  729. {
  730. if (!is_aligned_req(pgio, req, PAGE_SIZE))
  731. return 0;
  732. return pnfs_generic_pg_test(pgio, prev, req);
  733. }
  734. static const struct nfs_pageio_ops bl_pg_read_ops = {
  735. .pg_init = bl_pg_init_read,
  736. .pg_test = bl_pg_test_read,
  737. .pg_doio = pnfs_generic_pg_readpages,
  738. .pg_cleanup = pnfs_generic_pg_cleanup,
  739. };
  740. static const struct nfs_pageio_ops bl_pg_write_ops = {
  741. .pg_init = bl_pg_init_write,
  742. .pg_test = bl_pg_test_write,
  743. .pg_doio = pnfs_generic_pg_writepages,
  744. .pg_cleanup = pnfs_generic_pg_cleanup,
  745. };
  746. static struct pnfs_layoutdriver_type blocklayout_type = {
  747. .id = LAYOUT_BLOCK_VOLUME,
  748. .name = "LAYOUT_BLOCK_VOLUME",
  749. .owner = THIS_MODULE,
  750. .flags = PNFS_LAYOUTRET_ON_SETATTR |
  751. PNFS_READ_WHOLE_PAGE,
  752. .read_pagelist = bl_read_pagelist,
  753. .write_pagelist = bl_write_pagelist,
  754. .alloc_layout_hdr = bl_alloc_layout_hdr,
  755. .free_layout_hdr = bl_free_layout_hdr,
  756. .alloc_lseg = bl_alloc_lseg,
  757. .free_lseg = bl_free_lseg,
  758. .return_range = bl_return_range,
  759. .prepare_layoutcommit = bl_prepare_layoutcommit,
  760. .cleanup_layoutcommit = bl_cleanup_layoutcommit,
  761. .set_layoutdriver = bl_set_layoutdriver,
  762. .alloc_deviceid_node = bl_alloc_deviceid_node,
  763. .free_deviceid_node = bl_free_deviceid_node,
  764. .pg_read_ops = &bl_pg_read_ops,
  765. .pg_write_ops = &bl_pg_write_ops,
  766. .sync = pnfs_generic_sync,
  767. };
  768. static int __init nfs4blocklayout_init(void)
  769. {
  770. int ret;
  771. dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
  772. ret = pnfs_register_layoutdriver(&blocklayout_type);
  773. if (ret)
  774. goto out;
  775. ret = bl_init_pipefs();
  776. if (ret)
  777. goto out_unregister;
  778. return 0;
  779. out_unregister:
  780. pnfs_unregister_layoutdriver(&blocklayout_type);
  781. out:
  782. return ret;
  783. }
  784. static void __exit nfs4blocklayout_exit(void)
  785. {
  786. dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
  787. __func__);
  788. bl_cleanup_pipefs();
  789. pnfs_unregister_layoutdriver(&blocklayout_type);
  790. }
  791. MODULE_ALIAS("nfs-layouttype4-3");
  792. module_init(nfs4blocklayout_init);
  793. module_exit(nfs4blocklayout_exit);