blocklayout.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457
  1. /*
  2. * linux/fs/nfs/blocklayout/blocklayout.c
  3. *
  4. * Module for the NFSv4.1 pNFS block layout driver.
  5. *
  6. * Copyright (c) 2006 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Andy Adamson <andros@citi.umich.edu>
  10. * Fred Isaman <iisaman@umich.edu>
  11. *
  12. * permission is granted to use, copy, create derivative works and
  13. * redistribute this software and such derivative works for any purpose,
  14. * so long as the name of the university of michigan is not used in
  15. * any advertising or publicity pertaining to the use or distribution
  16. * of this software without specific, written prior authorization. if
  17. * the above copyright notice or any other identification of the
  18. * university of michigan is included in any copy of any portion of
  19. * this software, then the disclaimer below must also be included.
  20. *
  21. * this software is provided as is, without representation from the
  22. * university of michigan as to its fitness for any purpose, and without
  23. * warranty by the university of michigan of any kind, either express
  24. * or implied, including without limitation the implied warranties of
  25. * merchantability and fitness for a particular purpose. the regents
  26. * of the university of michigan shall not be liable for any damages,
  27. * including special, indirect, incidental, or consequential damages,
  28. * with respect to any claim arising out or in connection with the use
  29. * of the software, even if it has been or is hereafter advised of the
  30. * possibility of such damages.
  31. */
  32. #include <linux/module.h>
  33. #include <linux/init.h>
  34. #include <linux/mount.h>
  35. #include <linux/namei.h>
  36. #include <linux/bio.h> /* struct bio */
  37. #include <linux/buffer_head.h> /* various write calls */
  38. #include <linux/prefetch.h>
  39. #include <linux/pagevec.h>
  40. #include "../pnfs.h"
  41. #include "../nfs4session.h"
  42. #include "../internal.h"
  43. #include "blocklayout.h"
  44. #define NFSDBG_FACILITY NFSDBG_PNFS_LD
  45. MODULE_LICENSE("GPL");
  46. MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
  47. MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");
  48. static void print_page(struct page *page)
  49. {
  50. dprintk("PRINTPAGE page %p\n", page);
  51. dprintk(" PagePrivate %d\n", PagePrivate(page));
  52. dprintk(" PageUptodate %d\n", PageUptodate(page));
  53. dprintk(" PageError %d\n", PageError(page));
  54. dprintk(" PageDirty %d\n", PageDirty(page));
  55. dprintk(" PageReferenced %d\n", PageReferenced(page));
  56. dprintk(" PageLocked %d\n", PageLocked(page));
  57. dprintk(" PageWriteback %d\n", PageWriteback(page));
  58. dprintk(" PageMappedToDisk %d\n", PageMappedToDisk(page));
  59. dprintk("\n");
  60. }
  61. /* Given the be associated with isect, determine if page data needs to be
  62. * initialized.
  63. */
  64. static int is_hole(struct pnfs_block_extent *be, sector_t isect)
  65. {
  66. if (be->be_state == PNFS_BLOCK_NONE_DATA)
  67. return 1;
  68. else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
  69. return 0;
  70. else
  71. return !bl_is_sector_init(be->be_inval, isect);
  72. }
  73. /* Given the be associated with isect, determine if page data can be
  74. * written to disk.
  75. */
  76. static int is_writable(struct pnfs_block_extent *be, sector_t isect)
  77. {
  78. return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
  79. be->be_state == PNFS_BLOCK_INVALID_DATA);
  80. }
  81. /* The data we are handed might be spread across several bios. We need
  82. * to track when the last one is finished.
  83. */
  84. struct parallel_io {
  85. struct kref refcnt;
  86. void (*pnfs_callback) (void *data, int num_se);
  87. void *data;
  88. int bse_count;
  89. };
  90. static inline struct parallel_io *alloc_parallel(void *data)
  91. {
  92. struct parallel_io *rv;
  93. rv = kmalloc(sizeof(*rv), GFP_NOFS);
  94. if (rv) {
  95. rv->data = data;
  96. kref_init(&rv->refcnt);
  97. rv->bse_count = 0;
  98. }
  99. return rv;
  100. }
  101. static inline void get_parallel(struct parallel_io *p)
  102. {
  103. kref_get(&p->refcnt);
  104. }
  105. static void destroy_parallel(struct kref *kref)
  106. {
  107. struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
  108. dprintk("%s enter\n", __func__);
  109. p->pnfs_callback(p->data, p->bse_count);
  110. kfree(p);
  111. }
  112. static inline void put_parallel(struct parallel_io *p)
  113. {
  114. kref_put(&p->refcnt, destroy_parallel);
  115. }
  116. static struct bio *
  117. bl_submit_bio(int rw, struct bio *bio)
  118. {
  119. if (bio) {
  120. get_parallel(bio->bi_private);
  121. dprintk("%s submitting %s bio %u@%llu\n", __func__,
  122. rw == READ ? "read" : "write", bio->bi_iter.bi_size,
  123. (unsigned long long)bio->bi_iter.bi_sector);
  124. submit_bio(rw, bio);
  125. }
  126. return NULL;
  127. }
  128. static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
  129. struct pnfs_block_extent *be,
  130. void (*end_io)(struct bio *, int err),
  131. struct parallel_io *par)
  132. {
  133. struct bio *bio;
  134. npg = min(npg, BIO_MAX_PAGES);
  135. bio = bio_alloc(GFP_NOIO, npg);
  136. if (!bio && (current->flags & PF_MEMALLOC)) {
  137. while (!bio && (npg /= 2))
  138. bio = bio_alloc(GFP_NOIO, npg);
  139. }
  140. if (bio) {
  141. bio->bi_iter.bi_sector = isect - be->be_f_offset +
  142. be->be_v_offset;
  143. bio->bi_bdev = be->be_mdev;
  144. bio->bi_end_io = end_io;
  145. bio->bi_private = par;
  146. }
  147. return bio;
  148. }
  149. static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
  150. sector_t isect, struct page *page,
  151. struct pnfs_block_extent *be,
  152. void (*end_io)(struct bio *, int err),
  153. struct parallel_io *par,
  154. unsigned int offset, int len)
  155. {
  156. isect = isect + (offset >> SECTOR_SHIFT);
  157. dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
  158. npg, rw, (unsigned long long)isect, offset, len);
  159. retry:
  160. if (!bio) {
  161. bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
  162. if (!bio)
  163. return ERR_PTR(-ENOMEM);
  164. }
  165. if (bio_add_page(bio, page, len, offset) < len) {
  166. bio = bl_submit_bio(rw, bio);
  167. goto retry;
  168. }
  169. return bio;
  170. }
  171. static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
  172. sector_t isect, struct page *page,
  173. struct pnfs_block_extent *be,
  174. void (*end_io)(struct bio *, int err),
  175. struct parallel_io *par)
  176. {
  177. return do_add_page_to_bio(bio, npg, rw, isect, page, be,
  178. end_io, par, 0, PAGE_CACHE_SIZE);
  179. }
  180. /* This is basically copied from mpage_end_io_read */
  181. static void bl_end_io_read(struct bio *bio, int err)
  182. {
  183. struct parallel_io *par = bio->bi_private;
  184. struct bio_vec *bvec;
  185. int i;
  186. if (!err)
  187. bio_for_each_segment_all(bvec, bio, i)
  188. SetPageUptodate(bvec->bv_page);
  189. if (err) {
  190. struct nfs_pgio_header *header = par->data;
  191. if (!header->pnfs_error)
  192. header->pnfs_error = -EIO;
  193. pnfs_set_lo_fail(header->lseg);
  194. }
  195. bio_put(bio);
  196. put_parallel(par);
  197. }
  198. static void bl_read_cleanup(struct work_struct *work)
  199. {
  200. struct rpc_task *task;
  201. struct nfs_pgio_header *hdr;
  202. dprintk("%s enter\n", __func__);
  203. task = container_of(work, struct rpc_task, u.tk_work);
  204. hdr = container_of(task, struct nfs_pgio_header, task);
  205. pnfs_ld_read_done(hdr);
  206. }
  207. static void
  208. bl_end_par_io_read(void *data, int unused)
  209. {
  210. struct nfs_pgio_header *hdr = data;
  211. hdr->task.tk_status = hdr->pnfs_error;
  212. INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
  213. schedule_work(&hdr->task.u.tk_work);
  214. }
  215. static enum pnfs_try_status
  216. bl_read_pagelist(struct nfs_pgio_header *hdr)
  217. {
  218. struct nfs_pgio_header *header = hdr;
  219. int i, hole;
  220. struct bio *bio = NULL;
  221. struct pnfs_block_extent *be = NULL, *cow_read = NULL;
  222. sector_t isect, extent_length = 0;
  223. struct parallel_io *par;
  224. loff_t f_offset = hdr->args.offset;
  225. size_t bytes_left = hdr->args.count;
  226. unsigned int pg_offset, pg_len;
  227. struct page **pages = hdr->args.pages;
  228. int pg_index = hdr->args.pgbase >> PAGE_CACHE_SHIFT;
  229. const bool is_dio = (header->dreq != NULL);
  230. dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
  231. hdr->page_array.npages, f_offset,
  232. (unsigned int)hdr->args.count);
  233. par = alloc_parallel(hdr);
  234. if (!par)
  235. goto use_mds;
  236. par->pnfs_callback = bl_end_par_io_read;
  237. /* At this point, we can no longer jump to use_mds */
  238. isect = (sector_t) (f_offset >> SECTOR_SHIFT);
  239. /* Code assumes extents are page-aligned */
  240. for (i = pg_index; i < hdr->page_array.npages; i++) {
  241. if (!extent_length) {
  242. /* We've used up the previous extent */
  243. bl_put_extent(be);
  244. bl_put_extent(cow_read);
  245. bio = bl_submit_bio(READ, bio);
  246. /* Get the next one */
  247. be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
  248. isect, &cow_read);
  249. if (!be) {
  250. header->pnfs_error = -EIO;
  251. goto out;
  252. }
  253. extent_length = be->be_length -
  254. (isect - be->be_f_offset);
  255. if (cow_read) {
  256. sector_t cow_length = cow_read->be_length -
  257. (isect - cow_read->be_f_offset);
  258. extent_length = min(extent_length, cow_length);
  259. }
  260. }
  261. if (is_dio) {
  262. pg_offset = f_offset & ~PAGE_CACHE_MASK;
  263. if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
  264. pg_len = PAGE_CACHE_SIZE - pg_offset;
  265. else
  266. pg_len = bytes_left;
  267. f_offset += pg_len;
  268. bytes_left -= pg_len;
  269. isect += (pg_offset >> SECTOR_SHIFT);
  270. } else {
  271. pg_offset = 0;
  272. pg_len = PAGE_CACHE_SIZE;
  273. }
  274. hole = is_hole(be, isect);
  275. if (hole && !cow_read) {
  276. bio = bl_submit_bio(READ, bio);
  277. /* Fill hole w/ zeroes w/o accessing device */
  278. dprintk("%s Zeroing page for hole\n", __func__);
  279. zero_user_segment(pages[i], pg_offset, pg_len);
  280. print_page(pages[i]);
  281. SetPageUptodate(pages[i]);
  282. } else {
  283. struct pnfs_block_extent *be_read;
  284. be_read = (hole && cow_read) ? cow_read : be;
  285. bio = do_add_page_to_bio(bio,
  286. hdr->page_array.npages - i,
  287. READ,
  288. isect, pages[i], be_read,
  289. bl_end_io_read, par,
  290. pg_offset, pg_len);
  291. if (IS_ERR(bio)) {
  292. header->pnfs_error = PTR_ERR(bio);
  293. bio = NULL;
  294. goto out;
  295. }
  296. }
  297. isect += (pg_len >> SECTOR_SHIFT);
  298. extent_length -= PAGE_CACHE_SECTORS;
  299. }
  300. if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
  301. hdr->res.eof = 1;
  302. hdr->res.count = header->inode->i_size - hdr->args.offset;
  303. } else {
  304. hdr->res.count = (isect << SECTOR_SHIFT) - hdr->args.offset;
  305. }
  306. out:
  307. bl_put_extent(be);
  308. bl_put_extent(cow_read);
  309. bl_submit_bio(READ, bio);
  310. put_parallel(par);
  311. return PNFS_ATTEMPTED;
  312. use_mds:
  313. dprintk("Giving up and using normal NFS\n");
  314. return PNFS_NOT_ATTEMPTED;
  315. }
  316. static void mark_extents_written(struct pnfs_block_layout *bl,
  317. __u64 offset, __u32 count)
  318. {
  319. sector_t isect, end;
  320. struct pnfs_block_extent *be;
  321. struct pnfs_block_short_extent *se;
  322. dprintk("%s(%llu, %u)\n", __func__, offset, count);
  323. if (count == 0)
  324. return;
  325. isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
  326. end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
  327. end >>= SECTOR_SHIFT;
  328. while (isect < end) {
  329. sector_t len;
  330. be = bl_find_get_extent(bl, isect, NULL);
  331. BUG_ON(!be); /* FIXME */
  332. len = min(end, be->be_f_offset + be->be_length) - isect;
  333. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  334. se = bl_pop_one_short_extent(be->be_inval);
  335. BUG_ON(!se);
  336. bl_mark_for_commit(be, isect, len, se);
  337. }
  338. isect += len;
  339. bl_put_extent(be);
  340. }
  341. }
  342. static void bl_end_io_write_zero(struct bio *bio, int err)
  343. {
  344. struct parallel_io *par = bio->bi_private;
  345. struct bio_vec *bvec;
  346. int i;
  347. bio_for_each_segment_all(bvec, bio, i) {
  348. /* This is the zeroing page we added */
  349. end_page_writeback(bvec->bv_page);
  350. page_cache_release(bvec->bv_page);
  351. }
  352. if (unlikely(err)) {
  353. struct nfs_pgio_header *header = par->data;
  354. if (!header->pnfs_error)
  355. header->pnfs_error = -EIO;
  356. pnfs_set_lo_fail(header->lseg);
  357. }
  358. bio_put(bio);
  359. put_parallel(par);
  360. }
  361. static void bl_end_io_write(struct bio *bio, int err)
  362. {
  363. struct parallel_io *par = bio->bi_private;
  364. const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
  365. struct nfs_pgio_header *header = par->data;
  366. if (!uptodate) {
  367. if (!header->pnfs_error)
  368. header->pnfs_error = -EIO;
  369. pnfs_set_lo_fail(header->lseg);
  370. }
  371. bio_put(bio);
  372. put_parallel(par);
  373. }
  374. /* Function scheduled for call during bl_end_par_io_write,
  375. * it marks sectors as written and extends the commitlist.
  376. */
  377. static void bl_write_cleanup(struct work_struct *work)
  378. {
  379. struct rpc_task *task;
  380. struct nfs_pgio_header *hdr;
  381. dprintk("%s enter\n", __func__);
  382. task = container_of(work, struct rpc_task, u.tk_work);
  383. hdr = container_of(task, struct nfs_pgio_header, task);
  384. if (likely(!hdr->pnfs_error)) {
  385. /* Marks for LAYOUTCOMMIT */
  386. mark_extents_written(BLK_LSEG2EXT(hdr->lseg),
  387. hdr->args.offset, hdr->args.count);
  388. }
  389. pnfs_ld_write_done(hdr);
  390. }
  391. /* Called when last of bios associated with a bl_write_pagelist call finishes */
  392. static void bl_end_par_io_write(void *data, int num_se)
  393. {
  394. struct nfs_pgio_header *hdr = data;
  395. if (unlikely(hdr->pnfs_error)) {
  396. bl_free_short_extents(&BLK_LSEG2EXT(hdr->lseg)->bl_inval,
  397. num_se);
  398. }
  399. hdr->task.tk_status = hdr->pnfs_error;
  400. hdr->verf.committed = NFS_FILE_SYNC;
  401. INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
  402. schedule_work(&hdr->task.u.tk_work);
  403. }
  404. /* FIXME STUB - mark intersection of layout and page as bad, so is not
  405. * used again.
  406. */
  407. static void mark_bad_read(void)
  408. {
  409. return;
  410. }
  411. /*
  412. * map_block: map a requested I/0 block (isect) into an offset in the LVM
  413. * block_device
  414. */
  415. static void
  416. map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
  417. {
  418. dprintk("%s enter be=%p\n", __func__, be);
  419. set_buffer_mapped(bh);
  420. bh->b_bdev = be->be_mdev;
  421. bh->b_blocknr = (isect - be->be_f_offset + be->be_v_offset) >>
  422. (be->be_mdev->bd_inode->i_blkbits - SECTOR_SHIFT);
  423. dprintk("%s isect %llu, bh->b_blocknr %ld, using bsize %Zd\n",
  424. __func__, (unsigned long long)isect, (long)bh->b_blocknr,
  425. bh->b_size);
  426. return;
  427. }
  428. static void
  429. bl_read_single_end_io(struct bio *bio, int error)
  430. {
  431. struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
  432. struct page *page = bvec->bv_page;
  433. /* Only one page in bvec */
  434. unlock_page(page);
  435. }
  436. static int
  437. bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
  438. unsigned int offset, unsigned int len)
  439. {
  440. struct bio *bio;
  441. struct page *shadow_page;
  442. sector_t isect;
  443. char *kaddr, *kshadow_addr;
  444. int ret = 0;
  445. dprintk("%s: offset %u len %u\n", __func__, offset, len);
  446. shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
  447. if (shadow_page == NULL)
  448. return -ENOMEM;
  449. bio = bio_alloc(GFP_NOIO, 1);
  450. if (bio == NULL)
  451. return -ENOMEM;
  452. isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
  453. (offset / SECTOR_SIZE);
  454. bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
  455. bio->bi_bdev = be->be_mdev;
  456. bio->bi_end_io = bl_read_single_end_io;
  457. lock_page(shadow_page);
  458. if (bio_add_page(bio, shadow_page,
  459. SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
  460. unlock_page(shadow_page);
  461. bio_put(bio);
  462. return -EIO;
  463. }
  464. submit_bio(READ, bio);
  465. wait_on_page_locked(shadow_page);
  466. if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
  467. ret = -EIO;
  468. } else {
  469. kaddr = kmap_atomic(page);
  470. kshadow_addr = kmap_atomic(shadow_page);
  471. memcpy(kaddr + offset, kshadow_addr + offset, len);
  472. kunmap_atomic(kshadow_addr);
  473. kunmap_atomic(kaddr);
  474. }
  475. __free_page(shadow_page);
  476. bio_put(bio);
  477. return ret;
  478. }
  479. static int
  480. bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
  481. unsigned int dirty_offset, unsigned int dirty_len,
  482. bool full_page)
  483. {
  484. int ret = 0;
  485. unsigned int start, end;
  486. if (full_page) {
  487. start = 0;
  488. end = PAGE_CACHE_SIZE;
  489. } else {
  490. start = round_down(dirty_offset, SECTOR_SIZE);
  491. end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
  492. }
  493. dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
  494. if (!be) {
  495. zero_user_segments(page, start, dirty_offset,
  496. dirty_offset + dirty_len, end);
  497. if (start == 0 && end == PAGE_CACHE_SIZE &&
  498. trylock_page(page)) {
  499. SetPageUptodate(page);
  500. unlock_page(page);
  501. }
  502. return ret;
  503. }
  504. if (start != dirty_offset)
  505. ret = bl_do_readpage_sync(page, be, start, dirty_offset - start);
  506. if (!ret && (dirty_offset + dirty_len < end))
  507. ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
  508. end - dirty_offset - dirty_len);
  509. return ret;
  510. }
  511. /* Given an unmapped page, zero it or read in page for COW, page is locked
  512. * by caller.
  513. */
  514. static int
  515. init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
  516. {
  517. struct buffer_head *bh = NULL;
  518. int ret = 0;
  519. sector_t isect;
  520. dprintk("%s enter, %p\n", __func__, page);
  521. BUG_ON(PageUptodate(page));
  522. if (!cow_read) {
  523. zero_user_segment(page, 0, PAGE_SIZE);
  524. SetPageUptodate(page);
  525. goto cleanup;
  526. }
  527. bh = alloc_page_buffers(page, PAGE_CACHE_SIZE, 0);
  528. if (!bh) {
  529. ret = -ENOMEM;
  530. goto cleanup;
  531. }
  532. isect = (sector_t) page->index << PAGE_CACHE_SECTOR_SHIFT;
  533. map_block(bh, isect, cow_read);
  534. if (!bh_uptodate_or_lock(bh))
  535. ret = bh_submit_read(bh);
  536. if (ret)
  537. goto cleanup;
  538. SetPageUptodate(page);
  539. cleanup:
  540. if (bh)
  541. free_buffer_head(bh);
  542. if (ret) {
  543. /* Need to mark layout with bad read...should now
  544. * just use nfs4 for reads and writes.
  545. */
  546. mark_bad_read();
  547. }
  548. return ret;
  549. }
  550. /* Find or create a zeroing page marked being writeback.
  551. * Return ERR_PTR on error, NULL to indicate skip this page and page itself
  552. * to indicate write out.
  553. */
  554. static struct page *
  555. bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
  556. struct pnfs_block_extent *cow_read)
  557. {
  558. struct page *page;
  559. int locked = 0;
  560. page = find_get_page(inode->i_mapping, index);
  561. if (page)
  562. goto check_page;
  563. page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
  564. if (unlikely(!page)) {
  565. dprintk("%s oom\n", __func__);
  566. return ERR_PTR(-ENOMEM);
  567. }
  568. locked = 1;
  569. check_page:
  570. /* PageDirty: Other will write this out
  571. * PageWriteback: Other is writing this out
  572. * PageUptodate: It was read before
  573. */
  574. if (PageDirty(page) || PageWriteback(page)) {
  575. print_page(page);
  576. if (locked)
  577. unlock_page(page);
  578. page_cache_release(page);
  579. return NULL;
  580. }
  581. if (!locked) {
  582. lock_page(page);
  583. locked = 1;
  584. goto check_page;
  585. }
  586. if (!PageUptodate(page)) {
  587. /* New page, readin or zero it */
  588. init_page_for_write(page, cow_read);
  589. }
  590. set_page_writeback(page);
  591. unlock_page(page);
  592. return page;
  593. }
  594. static enum pnfs_try_status
  595. bl_write_pagelist(struct nfs_pgio_header *header, int sync)
  596. {
  597. int i, ret, npg_zero, pg_index, last = 0;
  598. struct bio *bio = NULL;
  599. struct pnfs_block_extent *be = NULL, *cow_read = NULL;
  600. sector_t isect, last_isect = 0, extent_length = 0;
  601. struct parallel_io *par = NULL;
  602. loff_t offset = header->args.offset;
  603. size_t count = header->args.count;
  604. unsigned int pg_offset, pg_len, saved_len;
  605. struct page **pages = header->args.pages;
  606. struct page *page;
  607. pgoff_t index;
  608. u64 temp;
  609. int npg_per_block =
  610. NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
  611. dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
  612. if (header->dreq != NULL &&
  613. (!IS_ALIGNED(offset, NFS_SERVER(header->inode)->pnfs_blksize) ||
  614. !IS_ALIGNED(count, NFS_SERVER(header->inode)->pnfs_blksize))) {
  615. dprintk("pnfsblock nonblock aligned DIO writes. Resend MDS\n");
  616. goto out_mds;
  617. }
  618. /* At this point, header->page_aray is a (sequential) list of nfs_pages.
  619. * We want to write each, and if there is an error set pnfs_error
  620. * to have it redone using nfs.
  621. */
  622. par = alloc_parallel(header);
  623. if (!par)
  624. goto out_mds;
  625. par->pnfs_callback = bl_end_par_io_write;
  626. /* At this point, have to be more careful with error handling */
  627. isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
  628. be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
  629. if (!be || !is_writable(be, isect)) {
  630. dprintk("%s no matching extents!\n", __func__);
  631. goto out_mds;
  632. }
  633. /* First page inside INVALID extent */
  634. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  635. if (likely(!bl_push_one_short_extent(be->be_inval)))
  636. par->bse_count++;
  637. else
  638. goto out_mds;
  639. temp = offset >> PAGE_CACHE_SHIFT;
  640. npg_zero = do_div(temp, npg_per_block);
  641. isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
  642. (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
  643. extent_length = be->be_length - (isect - be->be_f_offset);
  644. fill_invalid_ext:
  645. dprintk("%s need to zero %d pages\n", __func__, npg_zero);
  646. for (;npg_zero > 0; npg_zero--) {
  647. if (bl_is_sector_init(be->be_inval, isect)) {
  648. dprintk("isect %llu already init\n",
  649. (unsigned long long)isect);
  650. goto next_page;
  651. }
  652. /* page ref released in bl_end_io_write_zero */
  653. index = isect >> PAGE_CACHE_SECTOR_SHIFT;
  654. dprintk("%s zero %dth page: index %lu isect %llu\n",
  655. __func__, npg_zero, index,
  656. (unsigned long long)isect);
  657. page = bl_find_get_zeroing_page(header->inode, index,
  658. cow_read);
  659. if (unlikely(IS_ERR(page))) {
  660. header->pnfs_error = PTR_ERR(page);
  661. goto out;
  662. } else if (page == NULL)
  663. goto next_page;
  664. ret = bl_mark_sectors_init(be->be_inval, isect,
  665. PAGE_CACHE_SECTORS);
  666. if (unlikely(ret)) {
  667. dprintk("%s bl_mark_sectors_init fail %d\n",
  668. __func__, ret);
  669. end_page_writeback(page);
  670. page_cache_release(page);
  671. header->pnfs_error = ret;
  672. goto out;
  673. }
  674. if (likely(!bl_push_one_short_extent(be->be_inval)))
  675. par->bse_count++;
  676. else {
  677. end_page_writeback(page);
  678. page_cache_release(page);
  679. header->pnfs_error = -ENOMEM;
  680. goto out;
  681. }
  682. /* FIXME: This should be done in bi_end_io */
  683. mark_extents_written(BLK_LSEG2EXT(header->lseg),
  684. page->index << PAGE_CACHE_SHIFT,
  685. PAGE_CACHE_SIZE);
  686. bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
  687. isect, page, be,
  688. bl_end_io_write_zero, par);
  689. if (IS_ERR(bio)) {
  690. header->pnfs_error = PTR_ERR(bio);
  691. bio = NULL;
  692. goto out;
  693. }
  694. next_page:
  695. isect += PAGE_CACHE_SECTORS;
  696. extent_length -= PAGE_CACHE_SECTORS;
  697. }
  698. if (last)
  699. goto write_done;
  700. }
  701. bio = bl_submit_bio(WRITE, bio);
  702. /* Middle pages */
  703. pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
  704. for (i = pg_index; i < header->page_array.npages; i++) {
  705. if (!extent_length) {
  706. /* We've used up the previous extent */
  707. bl_put_extent(be);
  708. bl_put_extent(cow_read);
  709. bio = bl_submit_bio(WRITE, bio);
  710. /* Get the next one */
  711. be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
  712. isect, &cow_read);
  713. if (!be || !is_writable(be, isect)) {
  714. header->pnfs_error = -EINVAL;
  715. goto out;
  716. }
  717. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  718. if (likely(!bl_push_one_short_extent(
  719. be->be_inval)))
  720. par->bse_count++;
  721. else {
  722. header->pnfs_error = -ENOMEM;
  723. goto out;
  724. }
  725. }
  726. extent_length = be->be_length -
  727. (isect - be->be_f_offset);
  728. }
  729. dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
  730. pg_offset = offset & ~PAGE_CACHE_MASK;
  731. if (pg_offset + count > PAGE_CACHE_SIZE)
  732. pg_len = PAGE_CACHE_SIZE - pg_offset;
  733. else
  734. pg_len = count;
  735. saved_len = pg_len;
  736. if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
  737. !bl_is_sector_init(be->be_inval, isect)) {
  738. ret = bl_read_partial_page_sync(pages[i], cow_read,
  739. pg_offset, pg_len, true);
  740. if (ret) {
  741. dprintk("%s bl_read_partial_page_sync fail %d\n",
  742. __func__, ret);
  743. header->pnfs_error = ret;
  744. goto out;
  745. }
  746. ret = bl_mark_sectors_init(be->be_inval, isect,
  747. PAGE_CACHE_SECTORS);
  748. if (unlikely(ret)) {
  749. dprintk("%s bl_mark_sectors_init fail %d\n",
  750. __func__, ret);
  751. header->pnfs_error = ret;
  752. goto out;
  753. }
  754. /* Expand to full page write */
  755. pg_offset = 0;
  756. pg_len = PAGE_CACHE_SIZE;
  757. } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
  758. (pg_len & (SECTOR_SIZE - 1))){
  759. /* ahh, nasty case. We have to do sync full sector
  760. * read-modify-write cycles.
  761. */
  762. unsigned int saved_offset = pg_offset;
  763. ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
  764. pg_len, false);
  765. pg_offset = round_down(pg_offset, SECTOR_SIZE);
  766. pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
  767. - pg_offset;
  768. }
  769. bio = do_add_page_to_bio(bio, header->page_array.npages - i,
  770. WRITE,
  771. isect, pages[i], be,
  772. bl_end_io_write, par,
  773. pg_offset, pg_len);
  774. if (IS_ERR(bio)) {
  775. header->pnfs_error = PTR_ERR(bio);
  776. bio = NULL;
  777. goto out;
  778. }
  779. offset += saved_len;
  780. count -= saved_len;
  781. isect += PAGE_CACHE_SECTORS;
  782. last_isect = isect;
  783. extent_length -= PAGE_CACHE_SECTORS;
  784. }
  785. /* Last page inside INVALID extent */
  786. if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
  787. bio = bl_submit_bio(WRITE, bio);
  788. temp = last_isect >> PAGE_CACHE_SECTOR_SHIFT;
  789. npg_zero = npg_per_block - do_div(temp, npg_per_block);
  790. if (npg_zero < npg_per_block) {
  791. last = 1;
  792. goto fill_invalid_ext;
  793. }
  794. }
  795. write_done:
  796. header->res.count = header->args.count;
  797. out:
  798. bl_put_extent(be);
  799. bl_put_extent(cow_read);
  800. bl_submit_bio(WRITE, bio);
  801. put_parallel(par);
  802. return PNFS_ATTEMPTED;
  803. out_mds:
  804. bl_put_extent(be);
  805. bl_put_extent(cow_read);
  806. kfree(par);
  807. return PNFS_NOT_ATTEMPTED;
  808. }
  809. /* FIXME - range ignored */
  810. static void
  811. release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
  812. {
  813. int i;
  814. struct pnfs_block_extent *be;
  815. spin_lock(&bl->bl_ext_lock);
  816. for (i = 0; i < EXTENT_LISTS; i++) {
  817. while (!list_empty(&bl->bl_extents[i])) {
  818. be = list_first_entry(&bl->bl_extents[i],
  819. struct pnfs_block_extent,
  820. be_node);
  821. list_del(&be->be_node);
  822. bl_put_extent(be);
  823. }
  824. }
  825. spin_unlock(&bl->bl_ext_lock);
  826. }
  827. static void
  828. release_inval_marks(struct pnfs_inval_markings *marks)
  829. {
  830. struct pnfs_inval_tracking *pos, *temp;
  831. struct pnfs_block_short_extent *se, *stemp;
  832. list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
  833. list_del(&pos->it_link);
  834. kfree(pos);
  835. }
  836. list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
  837. list_del(&se->bse_node);
  838. kfree(se);
  839. }
  840. return;
  841. }
  842. static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
  843. {
  844. struct pnfs_block_layout *bl = BLK_LO2EXT(lo);
  845. dprintk("%s enter\n", __func__);
  846. release_extents(bl, NULL);
  847. release_inval_marks(&bl->bl_inval);
  848. kfree(bl);
  849. }
  850. static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
  851. gfp_t gfp_flags)
  852. {
  853. struct pnfs_block_layout *bl;
  854. dprintk("%s enter\n", __func__);
  855. bl = kzalloc(sizeof(*bl), gfp_flags);
  856. if (!bl)
  857. return NULL;
  858. spin_lock_init(&bl->bl_ext_lock);
  859. INIT_LIST_HEAD(&bl->bl_extents[0]);
  860. INIT_LIST_HEAD(&bl->bl_extents[1]);
  861. INIT_LIST_HEAD(&bl->bl_commit);
  862. INIT_LIST_HEAD(&bl->bl_committing);
  863. bl->bl_count = 0;
  864. bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
  865. BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
  866. return &bl->bl_layout;
  867. }
  868. static void bl_free_lseg(struct pnfs_layout_segment *lseg)
  869. {
  870. dprintk("%s enter\n", __func__);
  871. kfree(lseg);
  872. }
  873. /* We pretty much ignore lseg, and store all data layout wide, so we
  874. * can correctly merge.
  875. */
  876. static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
  877. struct nfs4_layoutget_res *lgr,
  878. gfp_t gfp_flags)
  879. {
  880. struct pnfs_layout_segment *lseg;
  881. int status;
  882. dprintk("%s enter\n", __func__);
  883. lseg = kzalloc(sizeof(*lseg), gfp_flags);
  884. if (!lseg)
  885. return ERR_PTR(-ENOMEM);
  886. status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
  887. if (status) {
  888. /* We don't want to call the full-blown bl_free_lseg,
  889. * since on error extents were not touched.
  890. */
  891. kfree(lseg);
  892. return ERR_PTR(status);
  893. }
  894. return lseg;
  895. }
  896. static void
  897. bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
  898. const struct nfs4_layoutcommit_args *arg)
  899. {
  900. dprintk("%s enter\n", __func__);
  901. encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
  902. }
  903. static void
  904. bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
  905. {
  906. struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;
  907. dprintk("%s enter\n", __func__);
  908. clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
  909. }
  910. static void free_blk_mountid(struct block_mount_id *mid)
  911. {
  912. if (mid) {
  913. struct pnfs_block_dev *dev, *tmp;
  914. /* No need to take bm_lock as we are last user freeing bm_devlist */
  915. list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
  916. list_del(&dev->bm_node);
  917. bl_free_block_dev(dev);
  918. }
  919. kfree(mid);
  920. }
  921. }
  922. /* This is mostly copied from the filelayout_get_device_info function.
  923. * It seems much of this should be at the generic pnfs level.
  924. */
  925. static struct pnfs_block_dev *
  926. nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
  927. struct nfs4_deviceid *d_id)
  928. {
  929. struct pnfs_device *dev;
  930. struct pnfs_block_dev *rv;
  931. u32 max_resp_sz;
  932. int max_pages;
  933. struct page **pages = NULL;
  934. int i, rc;
  935. /*
  936. * Use the session max response size as the basis for setting
  937. * GETDEVICEINFO's maxcount
  938. */
  939. max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
  940. max_pages = nfs_page_array_len(0, max_resp_sz);
  941. dprintk("%s max_resp_sz %u max_pages %d\n",
  942. __func__, max_resp_sz, max_pages);
  943. dev = kmalloc(sizeof(*dev), GFP_NOFS);
  944. if (!dev) {
  945. dprintk("%s kmalloc failed\n", __func__);
  946. return ERR_PTR(-ENOMEM);
  947. }
  948. pages = kcalloc(max_pages, sizeof(struct page *), GFP_NOFS);
  949. if (pages == NULL) {
  950. kfree(dev);
  951. return ERR_PTR(-ENOMEM);
  952. }
  953. for (i = 0; i < max_pages; i++) {
  954. pages[i] = alloc_page(GFP_NOFS);
  955. if (!pages[i]) {
  956. rv = ERR_PTR(-ENOMEM);
  957. goto out_free;
  958. }
  959. }
  960. memcpy(&dev->dev_id, d_id, sizeof(*d_id));
  961. dev->layout_type = LAYOUT_BLOCK_VOLUME;
  962. dev->pages = pages;
  963. dev->pgbase = 0;
  964. dev->pglen = PAGE_SIZE * max_pages;
  965. dev->mincount = 0;
  966. dev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
  967. dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
  968. rc = nfs4_proc_getdeviceinfo(server, dev, NULL);
  969. dprintk("%s getdevice info returns %d\n", __func__, rc);
  970. if (rc) {
  971. rv = ERR_PTR(rc);
  972. goto out_free;
  973. }
  974. rv = nfs4_blk_decode_device(server, dev);
  975. out_free:
  976. for (i = 0; i < max_pages; i++)
  977. __free_page(pages[i]);
  978. kfree(pages);
  979. kfree(dev);
  980. return rv;
  981. }
  982. static int
  983. bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
  984. {
  985. struct block_mount_id *b_mt_id = NULL;
  986. struct pnfs_devicelist *dlist = NULL;
  987. struct pnfs_block_dev *bdev;
  988. LIST_HEAD(block_disklist);
  989. int status, i;
  990. dprintk("%s enter\n", __func__);
  991. if (server->pnfs_blksize == 0) {
  992. dprintk("%s Server did not return blksize\n", __func__);
  993. return -EINVAL;
  994. }
  995. b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
  996. if (!b_mt_id) {
  997. status = -ENOMEM;
  998. goto out_error;
  999. }
  1000. /* Initialize nfs4 block layout mount id */
  1001. spin_lock_init(&b_mt_id->bm_lock);
  1002. INIT_LIST_HEAD(&b_mt_id->bm_devlist);
  1003. dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
  1004. if (!dlist) {
  1005. status = -ENOMEM;
  1006. goto out_error;
  1007. }
  1008. dlist->eof = 0;
  1009. while (!dlist->eof) {
  1010. status = nfs4_proc_getdevicelist(server, fh, dlist);
  1011. if (status)
  1012. goto out_error;
  1013. dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
  1014. __func__, dlist->num_devs, dlist->eof);
  1015. for (i = 0; i < dlist->num_devs; i++) {
  1016. bdev = nfs4_blk_get_deviceinfo(server, fh,
  1017. &dlist->dev_id[i]);
  1018. if (IS_ERR(bdev)) {
  1019. status = PTR_ERR(bdev);
  1020. goto out_error;
  1021. }
  1022. spin_lock(&b_mt_id->bm_lock);
  1023. list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
  1024. spin_unlock(&b_mt_id->bm_lock);
  1025. }
  1026. }
  1027. dprintk("%s SUCCESS\n", __func__);
  1028. server->pnfs_ld_data = b_mt_id;
  1029. out_return:
  1030. kfree(dlist);
  1031. return status;
  1032. out_error:
  1033. free_blk_mountid(b_mt_id);
  1034. goto out_return;
  1035. }
  1036. static int
  1037. bl_clear_layoutdriver(struct nfs_server *server)
  1038. {
  1039. struct block_mount_id *b_mt_id = server->pnfs_ld_data;
  1040. dprintk("%s enter\n", __func__);
  1041. free_blk_mountid(b_mt_id);
  1042. dprintk("%s RETURNS\n", __func__);
  1043. return 0;
  1044. }
  1045. static bool
  1046. is_aligned_req(struct nfs_page *req, unsigned int alignment)
  1047. {
  1048. return IS_ALIGNED(req->wb_offset, alignment) &&
  1049. IS_ALIGNED(req->wb_bytes, alignment);
  1050. }
  1051. static void
  1052. bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  1053. {
  1054. if (pgio->pg_dreq != NULL &&
  1055. !is_aligned_req(req, SECTOR_SIZE))
  1056. nfs_pageio_reset_read_mds(pgio);
  1057. else
  1058. pnfs_generic_pg_init_read(pgio, req);
  1059. }
  1060. /*
  1061. * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
  1062. * of bytes (maximum @req->wb_bytes) that can be coalesced.
  1063. */
  1064. static size_t
  1065. bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
  1066. struct nfs_page *req)
  1067. {
  1068. if (pgio->pg_dreq != NULL &&
  1069. !is_aligned_req(req, SECTOR_SIZE))
  1070. return 0;
  1071. return pnfs_generic_pg_test(pgio, prev, req);
  1072. }
  1073. /*
  1074. * Return the number of contiguous bytes for a given inode
  1075. * starting at page frame idx.
  1076. */
  1077. static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
  1078. {
  1079. struct address_space *mapping = inode->i_mapping;
  1080. pgoff_t end;
  1081. /* Optimize common case that writes from 0 to end of file */
  1082. end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
  1083. if (end != NFS_I(inode)->npages) {
  1084. rcu_read_lock();
  1085. end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
  1086. rcu_read_unlock();
  1087. }
  1088. if (!end)
  1089. return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
  1090. else
  1091. return (end - idx) << PAGE_CACHE_SHIFT;
  1092. }
  1093. static void
  1094. bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  1095. {
  1096. if (pgio->pg_dreq != NULL &&
  1097. !is_aligned_req(req, PAGE_CACHE_SIZE)) {
  1098. nfs_pageio_reset_write_mds(pgio);
  1099. } else {
  1100. u64 wb_size;
  1101. if (pgio->pg_dreq == NULL)
  1102. wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
  1103. req->wb_index);
  1104. else
  1105. wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);
  1106. pnfs_generic_pg_init_write(pgio, req, wb_size);
  1107. }
  1108. }
  1109. /*
  1110. * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
  1111. * of bytes (maximum @req->wb_bytes) that can be coalesced.
  1112. */
  1113. static size_t
  1114. bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
  1115. struct nfs_page *req)
  1116. {
  1117. if (pgio->pg_dreq != NULL &&
  1118. !is_aligned_req(req, PAGE_CACHE_SIZE))
  1119. return 0;
  1120. return pnfs_generic_pg_test(pgio, prev, req);
  1121. }
  1122. static const struct nfs_pageio_ops bl_pg_read_ops = {
  1123. .pg_init = bl_pg_init_read,
  1124. .pg_test = bl_pg_test_read,
  1125. .pg_doio = pnfs_generic_pg_readpages,
  1126. };
  1127. static const struct nfs_pageio_ops bl_pg_write_ops = {
  1128. .pg_init = bl_pg_init_write,
  1129. .pg_test = bl_pg_test_write,
  1130. .pg_doio = pnfs_generic_pg_writepages,
  1131. };
  1132. static struct pnfs_layoutdriver_type blocklayout_type = {
  1133. .id = LAYOUT_BLOCK_VOLUME,
  1134. .name = "LAYOUT_BLOCK_VOLUME",
  1135. .owner = THIS_MODULE,
  1136. .read_pagelist = bl_read_pagelist,
  1137. .write_pagelist = bl_write_pagelist,
  1138. .alloc_layout_hdr = bl_alloc_layout_hdr,
  1139. .free_layout_hdr = bl_free_layout_hdr,
  1140. .alloc_lseg = bl_alloc_lseg,
  1141. .free_lseg = bl_free_lseg,
  1142. .encode_layoutcommit = bl_encode_layoutcommit,
  1143. .cleanup_layoutcommit = bl_cleanup_layoutcommit,
  1144. .set_layoutdriver = bl_set_layoutdriver,
  1145. .clear_layoutdriver = bl_clear_layoutdriver,
  1146. .pg_read_ops = &bl_pg_read_ops,
  1147. .pg_write_ops = &bl_pg_write_ops,
  1148. };
  1149. static const struct rpc_pipe_ops bl_upcall_ops = {
  1150. .upcall = rpc_pipe_generic_upcall,
  1151. .downcall = bl_pipe_downcall,
  1152. .destroy_msg = bl_pipe_destroy_msg,
  1153. };
  1154. static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
  1155. struct rpc_pipe *pipe)
  1156. {
  1157. struct dentry *dir, *dentry;
  1158. dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
  1159. if (dir == NULL)
  1160. return ERR_PTR(-ENOENT);
  1161. dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
  1162. dput(dir);
  1163. return dentry;
  1164. }
  1165. static void nfs4blocklayout_unregister_sb(struct super_block *sb,
  1166. struct rpc_pipe *pipe)
  1167. {
  1168. if (pipe->dentry)
  1169. rpc_unlink(pipe->dentry);
  1170. }
  1171. static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
  1172. void *ptr)
  1173. {
  1174. struct super_block *sb = ptr;
  1175. struct net *net = sb->s_fs_info;
  1176. struct nfs_net *nn = net_generic(net, nfs_net_id);
  1177. struct dentry *dentry;
  1178. int ret = 0;
  1179. if (!try_module_get(THIS_MODULE))
  1180. return 0;
  1181. if (nn->bl_device_pipe == NULL) {
  1182. module_put(THIS_MODULE);
  1183. return 0;
  1184. }
  1185. switch (event) {
  1186. case RPC_PIPEFS_MOUNT:
  1187. dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
  1188. if (IS_ERR(dentry)) {
  1189. ret = PTR_ERR(dentry);
  1190. break;
  1191. }
  1192. nn->bl_device_pipe->dentry = dentry;
  1193. break;
  1194. case RPC_PIPEFS_UMOUNT:
  1195. if (nn->bl_device_pipe->dentry)
  1196. nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
  1197. break;
  1198. default:
  1199. ret = -ENOTSUPP;
  1200. break;
  1201. }
  1202. module_put(THIS_MODULE);
  1203. return ret;
  1204. }
  1205. static struct notifier_block nfs4blocklayout_block = {
  1206. .notifier_call = rpc_pipefs_event,
  1207. };
  1208. static struct dentry *nfs4blocklayout_register_net(struct net *net,
  1209. struct rpc_pipe *pipe)
  1210. {
  1211. struct super_block *pipefs_sb;
  1212. struct dentry *dentry;
  1213. pipefs_sb = rpc_get_sb_net(net);
  1214. if (!pipefs_sb)
  1215. return NULL;
  1216. dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
  1217. rpc_put_sb_net(net);
  1218. return dentry;
  1219. }
  1220. static void nfs4blocklayout_unregister_net(struct net *net,
  1221. struct rpc_pipe *pipe)
  1222. {
  1223. struct super_block *pipefs_sb;
  1224. pipefs_sb = rpc_get_sb_net(net);
  1225. if (pipefs_sb) {
  1226. nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
  1227. rpc_put_sb_net(net);
  1228. }
  1229. }
  1230. static int nfs4blocklayout_net_init(struct net *net)
  1231. {
  1232. struct nfs_net *nn = net_generic(net, nfs_net_id);
  1233. struct dentry *dentry;
  1234. init_waitqueue_head(&nn->bl_wq);
  1235. nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
  1236. if (IS_ERR(nn->bl_device_pipe))
  1237. return PTR_ERR(nn->bl_device_pipe);
  1238. dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
  1239. if (IS_ERR(dentry)) {
  1240. rpc_destroy_pipe_data(nn->bl_device_pipe);
  1241. return PTR_ERR(dentry);
  1242. }
  1243. nn->bl_device_pipe->dentry = dentry;
  1244. return 0;
  1245. }
  1246. static void nfs4blocklayout_net_exit(struct net *net)
  1247. {
  1248. struct nfs_net *nn = net_generic(net, nfs_net_id);
  1249. nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
  1250. rpc_destroy_pipe_data(nn->bl_device_pipe);
  1251. nn->bl_device_pipe = NULL;
  1252. }
  1253. static struct pernet_operations nfs4blocklayout_net_ops = {
  1254. .init = nfs4blocklayout_net_init,
  1255. .exit = nfs4blocklayout_net_exit,
  1256. };
  1257. static int __init nfs4blocklayout_init(void)
  1258. {
  1259. int ret;
  1260. dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);
  1261. ret = pnfs_register_layoutdriver(&blocklayout_type);
  1262. if (ret)
  1263. goto out;
  1264. ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
  1265. if (ret)
  1266. goto out_remove;
  1267. ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
  1268. if (ret)
  1269. goto out_notifier;
  1270. out:
  1271. return ret;
  1272. out_notifier:
  1273. rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
  1274. out_remove:
  1275. pnfs_unregister_layoutdriver(&blocklayout_type);
  1276. return ret;
  1277. }
  1278. static void __exit nfs4blocklayout_exit(void)
  1279. {
  1280. dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
  1281. __func__);
  1282. rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
  1283. unregister_pernet_subsys(&nfs4blocklayout_net_ops);
  1284. pnfs_unregister_layoutdriver(&blocklayout_type);
  1285. }
  1286. MODULE_ALIAS("nfs-layouttype4-3");
  1287. module_init(nfs4blocklayout_init);
  1288. module_exit(nfs4blocklayout_exit);