blocklayout.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926
  1. /*
  2. * linux/fs/nfs/blocklayout/blocklayout.c
  3. *
  4. * Module for the NFSv4.1 pNFS block layout driver.
  5. *
  6. * Copyright (c) 2006 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Andy Adamson <andros@citi.umich.edu>
  10. * Fred Isaman <iisaman@umich.edu>
  11. *
  12. * permission is granted to use, copy, create derivative works and
  13. * redistribute this software and such derivative works for any purpose,
  14. * so long as the name of the university of michigan is not used in
  15. * any advertising or publicity pertaining to the use or distribution
  16. * of this software without specific, written prior authorization. if
  17. * the above copyright notice or any other identification of the
  18. * university of michigan is included in any copy of any portion of
  19. * this software, then the disclaimer below must also be included.
  20. *
  21. * this software is provided as is, without representation from the
  22. * university of michigan as to its fitness for any purpose, and without
  23. * warranty by the university of michigan of any kind, either express
  24. * or implied, including without limitation the implied warranties of
  25. * merchantability and fitness for a particular purpose. the regents
  26. * of the university of michigan shall not be liable for any damages,
  27. * including special, indirect, incidental, or consequential damages,
  28. * with respect to any claim arising out or in connection with the use
  29. * of the software, even if it has been or is hereafter advised of the
  30. * possibility of such damages.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/init.h>
  34. #include <linux/mount.h>
  35. #include <linux/namei.h>
  36. #include <linux/bio.h> /* struct bio */
  37. #include <linux/prefetch.h>
  38. #include <linux/pagevec.h>
  39. #include "../pnfs.h"
  40. #include "../nfs4session.h"
  41. #include "../internal.h"
  42. #include "blocklayout.h"
  43. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  44. MODULE_LICENSE("GPL");
  45. MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
  46. MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
  47. static bool is_hole(struct pnfs_block_extent *be)
  48. {
  49. switch (be->be_state) {
  50. case PNFS_BLOCK_NONE_DATA:
  51. return true;
  52. case PNFS_BLOCK_INVALID_DATA:
  53. return be->be_tag ? false : true;
  54. default:
  55. return false;
  56. }
  57. }
  58. /* The data we are handed might be spread across several bios. We need
  59. * to track when the last one is finished.
  60. */
  61. struct parallel_io {
  62. struct kref refcnt;
  63. void (*pnfs_callback) (void *data);
  64. void *data;
  65. };
  66. static inline struct parallel_io *alloc_parallel(void *data)
  67. {
  68. struct parallel_io *rv;
  69. rv = kmalloc(sizeof(*rv), GFP_NOFS);
  70. if (rv) {
  71. rv->data = data;
  72. kref_init(&rv->refcnt);
  73. }
  74. return rv;
  75. }
  76. static inline void get_parallel(struct parallel_io *p)
  77. {
  78. kref_get(&p->refcnt);
  79. }
  80. static void destroy_parallel(struct kref *kref)
  81. {
  82. struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
  83. dprintk("%s enter\n", __func__);
  84. p->pnfs_callback(p->data);
  85. kfree(p);
  86. }
  87. static inline void put_parallel(struct parallel_io *p)
  88. {
  89. kref_put(&p->refcnt, destroy_parallel);
  90. }
  91. static struct bio *
  92. bl_submit_bio(int rw, struct bio *bio)
  93. {
  94. if (bio) {
  95. get_parallel(bio->bi_private);
  96. dprintk("%s submitting %s bio %u@%llu\n", __func__,
  97. rw == READ ? "read" : "write", bio->bi_iter.bi_size,
  98. (unsigned long long)bio->bi_iter.bi_sector);
  99. submit_bio(rw, bio);
  100. }
  101. return NULL;
  102. }
  103. static struct bio *
  104. bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
  105. bio_end_io_t end_io, struct parallel_io *par)
  106. {
  107. struct bio *bio;
  108. npg = min(npg, BIO_MAX_PAGES);
  109. bio = bio_alloc(GFP_NOIO, npg);
  110. if (!bio && (current->flags & PF_MEMALLOC)) {
  111. while (!bio && (npg /= 2))
  112. bio = bio_alloc(GFP_NOIO, npg);
  113. }
  114. if (bio) {
  115. bio->bi_iter.bi_sector = disk_sector;
  116. bio->bi_bdev = bdev;
  117. bio->bi_end_io = end_io;
  118. bio->bi_private = par;
  119. }
  120. return bio;
  121. }
  122. static struct bio *
  123. do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
  124. struct page *page, struct pnfs_block_dev_map *map,
  125. struct pnfs_block_extent *be, bio_end_io_t end_io,
  126. struct parallel_io *par, unsigned int offset, int *len)
  127. {
  128. struct pnfs_block_dev *dev =
  129. container_of(be->be_device, struct pnfs_block_dev, node);
  130. u64 disk_addr, end;
  131. dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
  132. npg, rw, (unsigned long long)isect, offset, *len);
  133. /* translate to device offset */
  134. isect += be->be_v_offset;
  135. isect -= be->be_f_offset;
  136. /* translate to physical disk offset */
  137. disk_addr = (u64)isect << SECTOR_SHIFT;
  138. if (disk_addr < map->start || disk_addr >= map->start + map->len) {
  139. if (!dev->map(dev, disk_addr, map))
  140. return ERR_PTR(-EIO);
  141. bio = bl_submit_bio(rw, bio);
  142. }
  143. disk_addr += map->disk_offset;
  144. disk_addr -= map->start;
  145. /* limit length to what the device mapping allows */
  146. end = disk_addr + *len;
  147. if (end >= map->start + map->len)
  148. *len = map->start + map->len - disk_addr;
  149. retry:
  150. if (!bio) {
  151. bio = bl_alloc_init_bio(npg, map->bdev,
  152. disk_addr >> SECTOR_SHIFT, end_io, par);
  153. if (!bio)
  154. return ERR_PTR(-ENOMEM);
  155. }
  156. if (bio_add_page(bio, page, *len, offset) < *len) {
  157. bio = bl_submit_bio(rw, bio);
  158. goto retry;
  159. }
  160. return bio;
  161. }
  162. static void bl_end_io_read(struct bio *bio)
  163. {
  164. struct parallel_io *par = bio->bi_private;
  165. if (bio->bi_error) {
  166. struct nfs_pgio_header *header = par->data;
  167. if (!header->pnfs_error)
  168. header->pnfs_error = -EIO;
  169. pnfs_set_lo_fail(header->lseg);
  170. }
  171. bio_put(bio);
  172. put_parallel(par);
  173. }
  174. static void bl_read_cleanup(struct work_struct *work)
  175. {
  176. struct rpc_task *task;
  177. struct nfs_pgio_header *hdr;
  178. dprintk("%s enter\n", __func__);
  179. task = container_of(work, struct rpc_task, u.tk_work);
  180. hdr = container_of(task, struct nfs_pgio_header, task);
  181. pnfs_ld_read_done(hdr);
  182. }
  183. static void
  184. bl_end_par_io_read(void *data)
  185. {
  186. struct nfs_pgio_header *hdr = data;
  187. hdr->task.tk_status = hdr->pnfs_error;
  188. INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
  189. schedule_work(&hdr->task.u.tk_work);
  190. }
  191. static enum pnfs_try_status
  192. bl_read_pagelist(struct nfs_pgio_header *header)
  193. {
  194. struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
  195. struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
  196. struct bio *bio = NULL;
  197. struct pnfs_block_extent be;
  198. sector_t isect, extent_length = 0;
  199. struct parallel_io *par;
  200. loff_t f_offset = header->args.offset;
  201. size_t bytes_left = header->args.count;
  202. unsigned int pg_offset, pg_len;
  203. struct page **pages = header->args.pages;
  204. int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
  205. const bool is_dio = (header->dreq != NULL);
  206. struct blk_plug plug;
  207. int i;
  208. dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
  209. header->page_array.npages, f_offset,
  210. (unsigned int)header->args.count);
  211. par = alloc_parallel(header);
  212. if (!par)
  213. return PNFS_NOT_ATTEMPTED;
  214. par->pnfs_callback = bl_end_par_io_read;
  215. blk_start_plug(&plug);
  216. isect = (sector_t) (f_offset >> SECTOR_SHIFT);
  217. /* Code assumes extents are page-aligned */
  218. for (i = pg_index; i < header->page_array.npages; i++) {
  219. if (extent_length <= 0) {
  220. /* We've used up the previous extent */
  221. bio = bl_submit_bio(READ, bio);
  222. /* Get the next one */
  223. if (!ext_tree_lookup(bl, isect, &be, false)) {
  224. header->pnfs_error = -EIO;
  225. goto out;
  226. }
  227. extent_length = be.be_length - (isect - be.be_f_offset);
  228. }
  229. pg_offset = f_offset & ~PAGE_CACHE_MASK;
  230. if (is_dio) {
  231. if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
  232. pg_len = PAGE_CACHE_SIZE - pg_offset;
  233. else
  234. pg_len = bytes_left;
  235. } else {
  236. BUG_ON(pg_offset != 0);
  237. pg_len = PAGE_CACHE_SIZE;
  238. }
  239. isect += (pg_offset >> SECTOR_SHIFT);
  240. extent_length -= (pg_offset >> SECTOR_SHIFT);
  241. if (is_hole(&be)) {
  242. bio = bl_submit_bio(READ, bio);
  243. /* Fill hole w/ zeroes w/o accessing device */
  244. dprintk("%s Zeroing page for hole\n", __func__);
  245. zero_user_segment(pages[i], pg_offset, pg_len);
  246. /* invalidate map */
  247. map.start = NFS4_MAX_UINT64;
  248. } else {
  249. bio = do_add_page_to_bio(bio,
  250. header->page_array.npages - i,
  251. READ,
  252. isect, pages[i], &map, &be,
  253. bl_end_io_read, par,
  254. pg_offset, &pg_len);
  255. if (IS_ERR(bio)) {
  256. header->pnfs_error = PTR_ERR(bio);
  257. bio = NULL;
  258. goto out;
  259. }
  260. }
  261. isect += (pg_len >> SECTOR_SHIFT);
  262. extent_length -= (pg_len >> SECTOR_SHIFT);
  263. f_offset += pg_len;
  264. bytes_left -= pg_len;
  265. }
  266. if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
  267. header->res.eof = 1;
  268. header->res.count = header->inode->i_size - header->args.offset;
  269. } else {
  270. header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
  271. }
  272. out:
  273. bl_submit_bio(READ, bio);
  274. blk_finish_plug(&plug);
  275. put_parallel(par);
  276. return PNFS_ATTEMPTED;
  277. }
  278. static void bl_end_io_write(struct bio *bio)
  279. {
  280. struct parallel_io *par = bio->bi_private;
  281. struct nfs_pgio_header *header = par->data;
  282. if (bio->bi_error) {
  283. if (!header->pnfs_error)
  284. header->pnfs_error = -EIO;
  285. pnfs_set_lo_fail(header->lseg);
  286. }
  287. bio_put(bio);
  288. put_parallel(par);
  289. }
  290. /* Function scheduled for call during bl_end_par_io_write,
  291. * it marks sectors as written and extends the commitlist.
  292. */
  293. static void bl_write_cleanup(struct work_struct *work)
  294. {
  295. struct rpc_task *task = container_of(work, struct rpc_task, u.tk_work);
  296. struct nfs_pgio_header *hdr =
  297. container_of(task, struct nfs_pgio_header, task);
  298. dprintk("%s enter\n", __func__);
  299. if (likely(!hdr->pnfs_error)) {
  300. struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg);
  301. u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK;
  302. u64 end = (hdr->args.offset + hdr->args.count +
  303. PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK;
  304. ext_tree_mark_written(bl, start >> SECTOR_SHIFT,
  305. (end - start) >> SECTOR_SHIFT);
  306. }
  307. pnfs_ld_write_done(hdr);
  308. }
  309. /* Called when last of bios associated with a bl_write_pagelist call finishes */
  310. static void bl_end_par_io_write(void *data)
  311. {
  312. struct nfs_pgio_header *hdr = data;
  313. hdr->task.tk_status = hdr->pnfs_error;
  314. hdr->verf.committed = NFS_FILE_SYNC;
  315. INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
  316. schedule_work(&hdr->task.u.tk_work);
  317. }
  318. static enum pnfs_try_status
  319. bl_write_pagelist(struct nfs_pgio_header *header, int sync)
  320. {
  321. struct pnfs_block_layout *bl = BLK_LSEG2EXT(header->lseg);
  322. struct pnfs_block_dev_map map = { .start = NFS4_MAX_UINT64 };
  323. struct bio *bio = NULL;
  324. struct pnfs_block_extent be;
  325. sector_t isect, extent_length = 0;
  326. struct parallel_io *par = NULL;
  327. loff_t offset = header->args.offset;
  328. size_t count = header->args.count;
  329. struct page **pages = header->args.pages;
  330. int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
  331. unsigned int pg_len;
  332. struct blk_plug plug;
  333. int i;
  334. dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
  335. /* At this point, header->page_aray is a (sequential) list of nfs_pages.
  336. * We want to write each, and if there is an error set pnfs_error
  337. * to have it redone using nfs.
  338. */
  339. par = alloc_parallel(header);
  340. if (!par)
  341. return PNFS_NOT_ATTEMPTED;
  342. par->pnfs_callback = bl_end_par_io_write;
  343. blk_start_plug(&plug);
  344. /* we always write out the whole page */
  345. offset = offset & (loff_t)PAGE_CACHE_MASK;
  346. isect = offset >> SECTOR_SHIFT;
  347. for (i = pg_index; i < header->page_array.npages; i++) {
  348. if (extent_length <= 0) {
  349. /* We've used up the previous extent */
  350. bio = bl_submit_bio(WRITE, bio);
  351. /* Get the next one */
  352. if (!ext_tree_lookup(bl, isect, &be, true)) {
  353. header->pnfs_error = -EINVAL;
  354. goto out;
  355. }
  356. extent_length = be.be_length - (isect - be.be_f_offset);
  357. }
  358. pg_len = PAGE_CACHE_SIZE;
  359. bio = do_add_page_to_bio(bio, header->page_array.npages - i,
  360. WRITE, isect, pages[i], &map, &be,
  361. bl_end_io_write, par,
  362. 0, &pg_len);
  363. if (IS_ERR(bio)) {
  364. header->pnfs_error = PTR_ERR(bio);
  365. bio = NULL;
  366. goto out;
  367. }
  368. offset += pg_len;
  369. count -= pg_len;
  370. isect += (pg_len >> SECTOR_SHIFT);
  371. extent_length -= (pg_len >> SECTOR_SHIFT);
  372. }
  373. header->res.count = header->args.count;
  374. out:
  375. bl_submit_bio(WRITE, bio);
  376. blk_finish_plug(&plug);
  377. put_parallel(par);
  378. return PNFS_ATTEMPTED;
  379. }
  380. static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
  381. {
  382. struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
  383. int err;
  384. dprintk("%s enter\n", __func__);
  385. err = ext_tree_remove(bl, true, 0, LLONG_MAX);
  386. WARN_ON(err);
  387. kfree(bl);
  388. }
  389. static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
  390. gfp_t gfp_flags)
  391. {
  392. struct pnfs_block_layout *bl;
  393. dprintk("%s enter\n", __func__);
  394. bl = kzalloc(sizeof(*bl), gfp_flags);
  395. if (!bl)
  396. return NULL;
  397. bl->bl_ext_rw = RB_ROOT;
  398. bl->bl_ext_ro = RB_ROOT;
  399. spin_lock_init(&bl->bl_ext_lock);
  400. return &bl->bl_layout;
  401. }
  402. static void bl_free_lseg(struct pnfs_layout_segment *lseg)
  403. {
  404. dprintk("%s enter\n", __func__);
  405. kfree(lseg);
  406. }
  407. /* Tracks info needed to ensure extents in layout obey constraints of spec */
  408. struct layout_verification {
  409. u32 mode; /* R or RW */
  410. u64 start; /* Expected start of next non-COW extent */
  411. u64 inval; /* Start of INVAL coverage */
  412. u64 cowread; /* End of COW read coverage */
  413. };
  414. /* Verify the extent meets the layout requirements of the pnfs-block draft,
  415. * section 2.3.1.
  416. */
  417. static int verify_extent(struct pnfs_block_extent *be,
  418. struct layout_verification *lv)
  419. {
  420. if (lv->mode == IOMODE_READ) {
  421. if (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
  422. be->be_state == PNFS_BLOCK_INVALID_DATA)
  423. return -EIO;
  424. if (be->be_f_offset != lv->start)
  425. return -EIO;
  426. lv->start += be->be_length;
  427. return 0;
  428. }
  429. /* lv->mode == IOMODE_RW */
  430. if (be->be_state == PNFS_BLOCK_READWRITE_DATA) {
  431. if (be->be_f_offset != lv->start)
  432. return -EIO;
  433. if (lv->cowread > lv->start)
  434. return -EIO;
  435. lv->start += be->be_length;
  436. lv->inval = lv->start;
  437. return 0;
  438. } else if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  439. if (be->be_f_offset != lv->start)
  440. return -EIO;
  441. lv->start += be->be_length;
  442. return 0;
  443. } else if (be->be_state == PNFS_BLOCK_READ_DATA) {
  444. if (be->be_f_offset > lv->start)
  445. return -EIO;
  446. if (be->be_f_offset < lv->inval)
  447. return -EIO;
  448. if (be->be_f_offset < lv->cowread)
  449. return -EIO;
  450. /* It looks like you might want to min this with lv->start,
  451. * but you really don't.
  452. */
  453. lv->inval = lv->inval + be->be_length;
  454. lv->cowread = be->be_f_offset + be->be_length;
  455. return 0;
  456. } else
  457. return -EIO;
  458. }
  459. static int decode_sector_number(__be32 **rp, sector_t *sp)
  460. {
  461. uint64_t s;
  462. *rp = xdr_decode_hyper(*rp, &s);
  463. if (s & 0x1ff) {
  464. printk(KERN_WARNING "NFS: %s: sector not aligned\n", __func__);
  465. return -1;
  466. }
  467. *sp = s >> SECTOR_SHIFT;
  468. return 0;
  469. }
  470. static int
  471. bl_alloc_extent(struct xdr_stream *xdr, struct pnfs_layout_hdr *lo,
  472. struct layout_verification *lv, struct list_head *extents,
  473. gfp_t gfp_mask)
  474. {
  475. struct pnfs_block_extent *be;
  476. struct nfs4_deviceid id;
  477. int error;
  478. __be32 *p;
  479. p = xdr_inline_decode(xdr, 28 + NFS4_DEVICEID4_SIZE);
  480. if (!p)
  481. return -EIO;
  482. be = kzalloc(sizeof(*be), GFP_NOFS);
  483. if (!be)
  484. return -ENOMEM;
  485. memcpy(&id, p, NFS4_DEVICEID4_SIZE);
  486. p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
  487. error = -EIO;
  488. be->be_device = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &id,
  489. lo->plh_lc_cred, gfp_mask);
  490. if (!be->be_device)
  491. goto out_free_be;
  492. /*
  493. * The next three values are read in as bytes, but stored in the
  494. * extent structure in 512-byte granularity.
  495. */
  496. if (decode_sector_number(&p, &be->be_f_offset) < 0)
  497. goto out_put_deviceid;
  498. if (decode_sector_number(&p, &be->be_length) < 0)
  499. goto out_put_deviceid;
  500. if (decode_sector_number(&p, &be->be_v_offset) < 0)
  501. goto out_put_deviceid;
  502. be->be_state = be32_to_cpup(p++);
  503. error = verify_extent(be, lv);
  504. if (error) {
  505. dprintk("%s: extent verification failed\n", __func__);
  506. goto out_put_deviceid;
  507. }
  508. list_add_tail(&be->be_list, extents);
  509. return 0;
  510. out_put_deviceid:
  511. nfs4_put_deviceid_node(be->be_device);
  512. out_free_be:
  513. kfree(be);
  514. return error;
  515. }
  516. static struct pnfs_layout_segment *
  517. bl_alloc_lseg(struct pnfs_layout_hdr *lo, struct nfs4_layoutget_res *lgr,
  518. gfp_t gfp_mask)
  519. {
  520. struct layout_verification lv = {
  521. .mode = lgr->range.iomode,
  522. .start = lgr->range.offset >> SECTOR_SHIFT,
  523. .inval = lgr->range.offset >> SECTOR_SHIFT,
  524. .cowread = lgr->range.offset >> SECTOR_SHIFT,
  525. };
  526. struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
  527. struct pnfs_layout_segment *lseg;
  528. struct xdr_buf buf;
  529. struct xdr_stream xdr;
  530. struct page *scratch;
  531. int status, i;
  532. uint32_t count;
  533. __be32 *p;
  534. LIST_HEAD(extents);
  535. dprintk("---> %s\n", __func__);
  536. lseg = kzalloc(sizeof(*lseg), gfp_mask);
  537. if (!lseg)
  538. return ERR_PTR(-ENOMEM);
  539. status = -ENOMEM;
  540. scratch = alloc_page(gfp_mask);
  541. if (!scratch)
  542. goto out;
  543. xdr_init_decode_pages(&xdr, &buf,
  544. lgr->layoutp->pages, lgr->layoutp->len);
  545. xdr_set_scratch_buffer(&xdr, page_address(scratch), PAGE_SIZE);
  546. status = -EIO;
  547. p = xdr_inline_decode(&xdr, 4);
  548. if (unlikely(!p))
  549. goto out_free_scratch;
  550. count = be32_to_cpup(p++);
  551. dprintk("%s: number of extents %d\n", __func__, count);
  552. /*
  553. * Decode individual extents, putting them in temporary staging area
  554. * until whole layout is decoded to make error recovery easier.
  555. */
  556. for (i = 0; i < count; i++) {
  557. status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask);
  558. if (status)
  559. goto process_extents;
  560. }
  561. if (lgr->range.offset + lgr->range.length !=
  562. lv.start << SECTOR_SHIFT) {
  563. dprintk("%s Final length mismatch\n", __func__);
  564. status = -EIO;
  565. goto process_extents;
  566. }
  567. if (lv.start < lv.cowread) {
  568. dprintk("%s Final uncovered COW extent\n", __func__);
  569. status = -EIO;
  570. }
  571. process_extents:
  572. while (!list_empty(&extents)) {
  573. struct pnfs_block_extent *be =
  574. list_first_entry(&extents, struct pnfs_block_extent,
  575. be_list);
  576. list_del(&be->be_list);
  577. if (!status)
  578. status = ext_tree_insert(bl, be);
  579. if (status) {
  580. nfs4_put_deviceid_node(be->be_device);
  581. kfree(be);
  582. }
  583. }
  584. out_free_scratch:
  585. __free_page(scratch);
  586. out:
  587. dprintk("%s returns %d\n", __func__, status);
  588. if (status) {
  589. kfree(lseg);
  590. return ERR_PTR(status);
  591. }
  592. return lseg;
  593. }
  594. static void
  595. bl_return_range(struct pnfs_layout_hdr *lo,
  596. struct pnfs_layout_range *range)
  597. {
  598. struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
  599. sector_t offset = range->offset >> SECTOR_SHIFT, end;
  600. if (range->offset % 8) {
  601. dprintk("%s: offset %lld not block size aligned\n",
  602. __func__, range->offset);
  603. return;
  604. }
  605. if (range->length != NFS4_MAX_UINT64) {
  606. if (range->length % 8) {
  607. dprintk("%s: length %lld not block size aligned\n",
  608. __func__, range->length);
  609. return;
  610. }
  611. end = offset + (range->length >> SECTOR_SHIFT);
  612. } else {
  613. end = round_down(NFS4_MAX_UINT64, PAGE_SIZE);
  614. }
  615. ext_tree_remove(bl, range->iomode & IOMODE_RW, offset, end);
  616. }
  617. static int
  618. bl_prepare_layoutcommit(struct nfs4_layoutcommit_args *arg)
  619. {
  620. return ext_tree_prepare_commit(arg);
  621. }
  622. static void
  623. bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
  624. {
  625. ext_tree_mark_committed(&lcdata->args, lcdata->res.status);
  626. }
  627. static int
  628. bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
  629. {
  630. dprintk("%s enter\n", __func__);
  631. if (server->pnfs_blksize == 0) {
  632. dprintk("%s Server did not return blksize\n", __func__);
  633. return -EINVAL;
  634. }
  635. if (server->pnfs_blksize > PAGE_SIZE) {
  636. printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
  637. __func__, server->pnfs_blksize);
  638. return -EINVAL;
  639. }
  640. return 0;
  641. }
  642. static bool
  643. is_aligned_req(struct nfs_pageio_descriptor *pgio,
  644. struct nfs_page *req, unsigned int alignment)
  645. {
  646. /*
  647. * Always accept buffered writes, higher layers take care of the
  648. * right alignment.
  649. */
  650. if (pgio->pg_dreq == NULL)
  651. return true;
  652. if (!IS_ALIGNED(req->wb_offset, alignment))
  653. return false;
  654. if (IS_ALIGNED(req->wb_bytes, alignment))
  655. return true;
  656. if (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode)) {
  657. /*
  658. * If the write goes up to the inode size, just write
  659. * the full page. Data past the inode size is
  660. * guaranteed to be zeroed by the higher level client
  661. * code, and this behaviour is mandated by RFC 5663
  662. * section 2.3.2.
  663. */
  664. return true;
  665. }
  666. return false;
  667. }
  668. static void
  669. bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  670. {
  671. if (!is_aligned_req(pgio, req, SECTOR_SIZE)) {
  672. nfs_pageio_reset_read_mds(pgio);
  673. return;
  674. }
  675. pnfs_generic_pg_init_read(pgio, req);
  676. }
  677. /*
  678. * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
  679. * of bytes (maximum @req->wb_bytes) that can be coalesced.
  680. */
  681. static size_t
  682. bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
  683. struct nfs_page *req)
  684. {
  685. if (!is_aligned_req(pgio, req, SECTOR_SIZE))
  686. return 0;
  687. return pnfs_generic_pg_test(pgio, prev, req);
  688. }
  689. /*
  690. * Return the number of contiguous bytes for a given inode
  691. * starting at page frame idx.
  692. */
  693. static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
  694. {
  695. struct address_space *mapping = inode->i_mapping;
  696. pgoff_t end;
  697. /* Optimize common case that writes from 0 to end of file */
  698. end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
  699. if (end != inode->i_mapping->nrpages) {
  700. rcu_read_lock();
  701. end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
  702. rcu_read_unlock();
  703. }
  704. if (!end)
  705. return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
  706. else
  707. return (end - idx) << PAGE_CACHE_SHIFT;
  708. }
  709. static void
  710. bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  711. {
  712. u64 wb_size;
  713. if (!is_aligned_req(pgio, req, PAGE_SIZE)) {
  714. nfs_pageio_reset_write_mds(pgio);
  715. return;
  716. }
  717. if (pgio->pg_dreq == NULL)
  718. wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
  719. req->wb_index);
  720. else
  721. wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
  722. pnfs_generic_pg_init_write(pgio, req, wb_size);
  723. }
  724. /*
  725. * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
  726. * of bytes (maximum @req->wb_bytes) that can be coalesced.
  727. */
  728. static size_t
  729. bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
  730. struct nfs_page *req)
  731. {
  732. if (!is_aligned_req(pgio, req, PAGE_SIZE))
  733. return 0;
  734. return pnfs_generic_pg_test(pgio, prev, req);
  735. }
  736. static const struct nfs_pageio_ops bl_pg_read_ops = {
  737. .pg_init = bl_pg_init_read,
  738. .pg_test = bl_pg_test_read,
  739. .pg_doio = pnfs_generic_pg_readpages,
  740. .pg_cleanup = pnfs_generic_pg_cleanup,
  741. };
  742. static const struct nfs_pageio_ops bl_pg_write_ops = {
  743. .pg_init = bl_pg_init_write,
  744. .pg_test = bl_pg_test_write,
  745. .pg_doio = pnfs_generic_pg_writepages,
  746. .pg_cleanup = pnfs_generic_pg_cleanup,
  747. };
  748. static struct pnfs_layoutdriver_type blocklayout_type = {
  749. .id = LAYOUT_BLOCK_VOLUME,
  750. .name = "LAYOUT_BLOCK_VOLUME",
  751. .owner = THIS_MODULE,
  752. .flags = PNFS_LAYOUTRET_ON_SETATTR |
  753. PNFS_READ_WHOLE_PAGE,
  754. .read_pagelist = bl_read_pagelist,
  755. .write_pagelist = bl_write_pagelist,
  756. .alloc_layout_hdr = bl_alloc_layout_hdr,
  757. .free_layout_hdr = bl_free_layout_hdr,
  758. .alloc_lseg = bl_alloc_lseg,
  759. .free_lseg = bl_free_lseg,
  760. .return_range = bl_return_range,
  761. .prepare_layoutcommit = bl_prepare_layoutcommit,
  762. .cleanup_layoutcommit = bl_cleanup_layoutcommit,
  763. .set_layoutdriver = bl_set_layoutdriver,
  764. .alloc_deviceid_node = bl_alloc_deviceid_node,
  765. .free_deviceid_node = bl_free_deviceid_node,
  766. .pg_read_ops = &bl_pg_read_ops,
  767. .pg_write_ops = &bl_pg_write_ops,
  768. .sync = pnfs_generic_sync,
  769. };
  770. static int __init nfs4blocklayout_init(void)
  771. {
  772. int ret;
  773. dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
  774. ret = pnfs_register_layoutdriver(&blocklayout_type);
  775. if (ret)
  776. goto out;
  777. ret = bl_init_pipefs();
  778. if (ret)
  779. goto out_unregister;
  780. return 0;
  781. out_unregister:
  782. pnfs_unregister_layoutdriver(&blocklayout_type);
  783. out:
  784. return ret;
  785. }
  786. static void __exit nfs4blocklayout_exit(void)
  787. {
  788. dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
  789. __func__);
  790. bl_cleanup_pipefs();
  791. pnfs_unregister_layoutdriver(&blocklayout_type);
  792. }
  793. MODULE_ALIAS("nfs-layouttype4-3");
  794. module_init(nfs4blocklayout_init);
  795. module_exit(nfs4blocklayout_exit);