aops.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/mpage.h>
  17. #include <linux/fs.h>
  18. #include <linux/writeback.h>
  19. #include <linux/swap.h>
  20. #include <linux/gfs2_ondisk.h>
  21. #include <linux/backing-dev.h>
  22. #include <linux/uio.h>
  23. #include <trace/events/writeback.h>
  24. #include "gfs2.h"
  25. #include "incore.h"
  26. #include "bmap.h"
  27. #include "glock.h"
  28. #include "inode.h"
  29. #include "log.h"
  30. #include "meta_io.h"
  31. #include "quota.h"
  32. #include "trans.h"
  33. #include "rgrp.h"
  34. #include "super.h"
  35. #include "util.h"
  36. #include "glops.h"
  37. static void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
  38. unsigned int from, unsigned int to)
  39. {
  40. struct buffer_head *head = page_buffers(page);
  41. unsigned int bsize = head->b_size;
  42. struct buffer_head *bh;
  43. unsigned int start, end;
  44. for (bh = head, start = 0; bh != head || !start;
  45. bh = bh->b_this_page, start = end) {
  46. end = start + bsize;
  47. if (end <= from || start >= to)
  48. continue;
  49. if (gfs2_is_jdata(ip))
  50. set_buffer_uptodate(bh);
  51. gfs2_trans_add_data(ip->i_gl, bh);
  52. }
  53. }
  54. /**
  55. * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
  56. * @inode: The inode
  57. * @lblock: The block number to look up
  58. * @bh_result: The buffer head to return the result in
  59. * @create: Non-zero if we may add block to the file
  60. *
  61. * Returns: errno
  62. */
  63. static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
  64. struct buffer_head *bh_result, int create)
  65. {
  66. int error;
  67. error = gfs2_block_map(inode, lblock, bh_result, 0);
  68. if (error)
  69. return error;
  70. if (!buffer_mapped(bh_result))
  71. return -EIO;
  72. return 0;
  73. }
  74. static int gfs2_get_block_direct(struct inode *inode, sector_t lblock,
  75. struct buffer_head *bh_result, int create)
  76. {
  77. return gfs2_block_map(inode, lblock, bh_result, 0);
  78. }
  79. /**
  80. * gfs2_writepage_common - Common bits of writepage
  81. * @page: The page to be written
  82. * @wbc: The writeback control
  83. *
  84. * Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
  85. */
  86. static int gfs2_writepage_common(struct page *page,
  87. struct writeback_control *wbc)
  88. {
  89. struct inode *inode = page->mapping->host;
  90. struct gfs2_inode *ip = GFS2_I(inode);
  91. struct gfs2_sbd *sdp = GFS2_SB(inode);
  92. loff_t i_size = i_size_read(inode);
  93. pgoff_t end_index = i_size >> PAGE_SHIFT;
  94. unsigned offset;
  95. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
  96. goto out;
  97. if (current->journal_info)
  98. goto redirty;
  99. /* Is the page fully outside i_size? (truncate in progress) */
  100. offset = i_size & (PAGE_SIZE-1);
  101. if (page->index > end_index || (page->index == end_index && !offset)) {
  102. page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
  103. goto out;
  104. }
  105. return 1;
  106. redirty:
  107. redirty_page_for_writepage(wbc, page);
  108. out:
  109. unlock_page(page);
  110. return 0;
  111. }
  112. /**
  113. * gfs2_writepage - Write page for writeback mappings
  114. * @page: The page
  115. * @wbc: The writeback control
  116. *
  117. */
  118. static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
  119. {
  120. int ret;
  121. ret = gfs2_writepage_common(page, wbc);
  122. if (ret <= 0)
  123. return ret;
  124. return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
  125. }
  126. /* This is the same as calling block_write_full_page, but it also
  127. * writes pages outside of i_size
  128. */
  129. static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
  130. struct writeback_control *wbc)
  131. {
  132. struct inode * const inode = page->mapping->host;
  133. loff_t i_size = i_size_read(inode);
  134. const pgoff_t end_index = i_size >> PAGE_SHIFT;
  135. unsigned offset;
  136. /*
  137. * The page straddles i_size. It must be zeroed out on each and every
  138. * writepage invocation because it may be mmapped. "A file is mapped
  139. * in multiples of the page size. For a file that is not a multiple of
  140. * the page size, the remaining memory is zeroed when mapped, and
  141. * writes to that region are not written out to the file."
  142. */
  143. offset = i_size & (PAGE_SIZE-1);
  144. if (page->index == end_index && offset)
  145. zero_user_segment(page, offset, PAGE_SIZE);
  146. return __block_write_full_page(inode, page, get_block, wbc,
  147. end_buffer_async_write);
  148. }
  149. /**
  150. * __gfs2_jdata_writepage - The core of jdata writepage
  151. * @page: The page to write
  152. * @wbc: The writeback control
  153. *
  154. * This is shared between writepage and writepages and implements the
  155. * core of the writepage operation. If a transaction is required then
  156. * PageChecked will have been set and the transaction will have
  157. * already been started before this is called.
  158. */
  159. static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
  160. {
  161. struct inode *inode = page->mapping->host;
  162. struct gfs2_inode *ip = GFS2_I(inode);
  163. struct gfs2_sbd *sdp = GFS2_SB(inode);
  164. if (PageChecked(page)) {
  165. ClearPageChecked(page);
  166. if (!page_has_buffers(page)) {
  167. create_empty_buffers(page, inode->i_sb->s_blocksize,
  168. BIT(BH_Dirty)|BIT(BH_Uptodate));
  169. }
  170. gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
  171. }
  172. return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
  173. }
  174. /**
  175. * gfs2_jdata_writepage - Write complete page
  176. * @page: Page to write
  177. * @wbc: The writeback control
  178. *
  179. * Returns: errno
  180. *
  181. */
  182. static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
  183. {
  184. struct inode *inode = page->mapping->host;
  185. struct gfs2_inode *ip = GFS2_I(inode);
  186. struct gfs2_sbd *sdp = GFS2_SB(inode);
  187. int ret;
  188. if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
  189. goto out;
  190. if (PageChecked(page) || current->journal_info)
  191. goto out_ignore;
  192. ret = __gfs2_jdata_writepage(page, wbc);
  193. return ret;
  194. out_ignore:
  195. redirty_page_for_writepage(wbc, page);
  196. out:
  197. unlock_page(page);
  198. return 0;
  199. }
  200. /**
  201. * gfs2_writepages - Write a bunch of dirty pages back to disk
  202. * @mapping: The mapping to write
  203. * @wbc: Write-back control
  204. *
  205. * Used for both ordered and writeback modes.
  206. */
  207. static int gfs2_writepages(struct address_space *mapping,
  208. struct writeback_control *wbc)
  209. {
  210. struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
  211. int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
  212. /*
  213. * Even if we didn't write any pages here, we might still be holding
  214. * dirty pages in the ail. We forcibly flush the ail because we don't
  215. * want balance_dirty_pages() to loop indefinitely trying to write out
  216. * pages held in the ail that it can't find.
  217. */
  218. if (ret == 0)
  219. set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
  220. return ret;
  221. }
  222. /**
  223. * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
  224. * @mapping: The mapping
  225. * @wbc: The writeback control
  226. * @pvec: The vector of pages
  227. * @nr_pages: The number of pages to write
  228. * @end: End position
  229. * @done_index: Page index
  230. *
  231. * Returns: non-zero if loop should terminate, zero otherwise
  232. */
  233. static int gfs2_write_jdata_pagevec(struct address_space *mapping,
  234. struct writeback_control *wbc,
  235. struct pagevec *pvec,
  236. int nr_pages, pgoff_t end,
  237. pgoff_t *done_index)
  238. {
  239. struct inode *inode = mapping->host;
  240. struct gfs2_sbd *sdp = GFS2_SB(inode);
  241. unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
  242. int i;
  243. int ret;
  244. ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
  245. if (ret < 0)
  246. return ret;
  247. for(i = 0; i < nr_pages; i++) {
  248. struct page *page = pvec->pages[i];
  249. /*
  250. * At this point, the page may be truncated or
  251. * invalidated (changing page->mapping to NULL), or
  252. * even swizzled back from swapper_space to tmpfs file
  253. * mapping. However, page->index will not change
  254. * because we have a reference on the page.
  255. */
  256. if (page->index > end) {
  257. /*
  258. * can't be range_cyclic (1st pass) because
  259. * end == -1 in that case.
  260. */
  261. ret = 1;
  262. break;
  263. }
  264. *done_index = page->index;
  265. lock_page(page);
  266. if (unlikely(page->mapping != mapping)) {
  267. continue_unlock:
  268. unlock_page(page);
  269. continue;
  270. }
  271. if (!PageDirty(page)) {
  272. /* someone wrote it for us */
  273. goto continue_unlock;
  274. }
  275. if (PageWriteback(page)) {
  276. if (wbc->sync_mode != WB_SYNC_NONE)
  277. wait_on_page_writeback(page);
  278. else
  279. goto continue_unlock;
  280. }
  281. BUG_ON(PageWriteback(page));
  282. if (!clear_page_dirty_for_io(page))
  283. goto continue_unlock;
  284. trace_wbc_writepage(wbc, inode_to_bdi(inode));
  285. ret = __gfs2_jdata_writepage(page, wbc);
  286. if (unlikely(ret)) {
  287. if (ret == AOP_WRITEPAGE_ACTIVATE) {
  288. unlock_page(page);
  289. ret = 0;
  290. } else {
  291. /*
  292. * done_index is set past this page,
  293. * so media errors will not choke
  294. * background writeout for the entire
  295. * file. This has consequences for
  296. * range_cyclic semantics (ie. it may
  297. * not be suitable for data integrity
  298. * writeout).
  299. */
  300. *done_index = page->index + 1;
  301. ret = 1;
  302. break;
  303. }
  304. }
  305. /*
  306. * We stop writing back only if we are not doing
  307. * integrity sync. In case of integrity sync we have to
  308. * keep going until we have written all the pages
  309. * we tagged for writeback prior to entering this loop.
  310. */
  311. if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
  312. ret = 1;
  313. break;
  314. }
  315. }
  316. gfs2_trans_end(sdp);
  317. return ret;
  318. }
  319. /**
  320. * gfs2_write_cache_jdata - Like write_cache_pages but different
  321. * @mapping: The mapping to write
  322. * @wbc: The writeback control
  323. *
  324. * The reason that we use our own function here is that we need to
  325. * start transactions before we grab page locks. This allows us
  326. * to get the ordering right.
  327. */
  328. static int gfs2_write_cache_jdata(struct address_space *mapping,
  329. struct writeback_control *wbc)
  330. {
  331. int ret = 0;
  332. int done = 0;
  333. struct pagevec pvec;
  334. int nr_pages;
  335. pgoff_t uninitialized_var(writeback_index);
  336. pgoff_t index;
  337. pgoff_t end;
  338. pgoff_t done_index;
  339. int cycled;
  340. int range_whole = 0;
  341. int tag;
  342. pagevec_init(&pvec, 0);
  343. if (wbc->range_cyclic) {
  344. writeback_index = mapping->writeback_index; /* prev offset */
  345. index = writeback_index;
  346. if (index == 0)
  347. cycled = 1;
  348. else
  349. cycled = 0;
  350. end = -1;
  351. } else {
  352. index = wbc->range_start >> PAGE_SHIFT;
  353. end = wbc->range_end >> PAGE_SHIFT;
  354. if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
  355. range_whole = 1;
  356. cycled = 1; /* ignore range_cyclic tests */
  357. }
  358. if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
  359. tag = PAGECACHE_TAG_TOWRITE;
  360. else
  361. tag = PAGECACHE_TAG_DIRTY;
  362. retry:
  363. if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
  364. tag_pages_for_writeback(mapping, index, end);
  365. done_index = index;
  366. while (!done && (index <= end)) {
  367. nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
  368. min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
  369. if (nr_pages == 0)
  370. break;
  371. ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, end, &done_index);
  372. if (ret)
  373. done = 1;
  374. if (ret > 0)
  375. ret = 0;
  376. pagevec_release(&pvec);
  377. cond_resched();
  378. }
  379. if (!cycled && !done) {
  380. /*
  381. * range_cyclic:
  382. * We hit the last page and there is more work to be done: wrap
  383. * back to the start of the file
  384. */
  385. cycled = 1;
  386. index = 0;
  387. end = writeback_index - 1;
  388. goto retry;
  389. }
  390. if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
  391. mapping->writeback_index = done_index;
  392. return ret;
  393. }
  394. /**
  395. * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
  396. * @mapping: The mapping to write
  397. * @wbc: The writeback control
  398. *
  399. */
  400. static int gfs2_jdata_writepages(struct address_space *mapping,
  401. struct writeback_control *wbc)
  402. {
  403. struct gfs2_inode *ip = GFS2_I(mapping->host);
  404. struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
  405. int ret;
  406. ret = gfs2_write_cache_jdata(mapping, wbc);
  407. if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
  408. gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
  409. ret = gfs2_write_cache_jdata(mapping, wbc);
  410. }
  411. return ret;
  412. }
  413. /**
  414. * stuffed_readpage - Fill in a Linux page with stuffed file data
  415. * @ip: the inode
  416. * @page: the page
  417. *
  418. * Returns: errno
  419. */
  420. static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
  421. {
  422. struct buffer_head *dibh;
  423. u64 dsize = i_size_read(&ip->i_inode);
  424. void *kaddr;
  425. int error;
  426. /*
  427. * Due to the order of unstuffing files and ->fault(), we can be
  428. * asked for a zero page in the case of a stuffed file being extended,
  429. * so we need to supply one here. It doesn't happen often.
  430. */
  431. if (unlikely(page->index)) {
  432. zero_user(page, 0, PAGE_SIZE);
  433. SetPageUptodate(page);
  434. return 0;
  435. }
  436. error = gfs2_meta_inode_buffer(ip, &dibh);
  437. if (error)
  438. return error;
  439. kaddr = kmap_atomic(page);
  440. if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
  441. dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
  442. memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
  443. memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
  444. kunmap_atomic(kaddr);
  445. flush_dcache_page(page);
  446. brelse(dibh);
  447. SetPageUptodate(page);
  448. return 0;
  449. }
  450. /**
  451. * __gfs2_readpage - readpage
  452. * @file: The file to read a page for
  453. * @page: The page to read
  454. *
  455. * This is the core of gfs2's readpage. Its used by the internal file
  456. * reading code as in that case we already hold the glock. Also its
  457. * called by gfs2_readpage() once the required lock has been granted.
  458. *
  459. */
  460. static int __gfs2_readpage(void *file, struct page *page)
  461. {
  462. struct gfs2_inode *ip = GFS2_I(page->mapping->host);
  463. struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
  464. int error;
  465. if (gfs2_is_stuffed(ip)) {
  466. error = stuffed_readpage(ip, page);
  467. unlock_page(page);
  468. } else {
  469. error = mpage_readpage(page, gfs2_block_map);
  470. }
  471. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  472. return -EIO;
  473. return error;
  474. }
  475. /**
  476. * gfs2_readpage - read a page of a file
  477. * @file: The file to read
  478. * @page: The page of the file
  479. *
  480. * This deals with the locking required. We have to unlock and
  481. * relock the page in order to get the locking in the right
  482. * order.
  483. */
  484. static int gfs2_readpage(struct file *file, struct page *page)
  485. {
  486. struct address_space *mapping = page->mapping;
  487. struct gfs2_inode *ip = GFS2_I(mapping->host);
  488. struct gfs2_holder gh;
  489. int error;
  490. unlock_page(page);
  491. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
  492. error = gfs2_glock_nq(&gh);
  493. if (unlikely(error))
  494. goto out;
  495. error = AOP_TRUNCATED_PAGE;
  496. lock_page(page);
  497. if (page->mapping == mapping && !PageUptodate(page))
  498. error = __gfs2_readpage(file, page);
  499. else
  500. unlock_page(page);
  501. gfs2_glock_dq(&gh);
  502. out:
  503. gfs2_holder_uninit(&gh);
  504. if (error && error != AOP_TRUNCATED_PAGE)
  505. lock_page(page);
  506. return error;
  507. }
  508. /**
  509. * gfs2_internal_read - read an internal file
  510. * @ip: The gfs2 inode
  511. * @buf: The buffer to fill
  512. * @pos: The file position
  513. * @size: The amount to read
  514. *
  515. */
  516. int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
  517. unsigned size)
  518. {
  519. struct address_space *mapping = ip->i_inode.i_mapping;
  520. unsigned long index = *pos / PAGE_SIZE;
  521. unsigned offset = *pos & (PAGE_SIZE - 1);
  522. unsigned copied = 0;
  523. unsigned amt;
  524. struct page *page;
  525. void *p;
  526. do {
  527. amt = size - copied;
  528. if (offset + size > PAGE_SIZE)
  529. amt = PAGE_SIZE - offset;
  530. page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
  531. if (IS_ERR(page))
  532. return PTR_ERR(page);
  533. p = kmap_atomic(page);
  534. memcpy(buf + copied, p + offset, amt);
  535. kunmap_atomic(p);
  536. put_page(page);
  537. copied += amt;
  538. index++;
  539. offset = 0;
  540. } while(copied < size);
  541. (*pos) += size;
  542. return size;
  543. }
  544. /**
  545. * gfs2_readpages - Read a bunch of pages at once
  546. * @file: The file to read from
  547. * @mapping: Address space info
  548. * @pages: List of pages to read
  549. * @nr_pages: Number of pages to read
  550. *
  551. * Some notes:
  552. * 1. This is only for readahead, so we can simply ignore any things
  553. * which are slightly inconvenient (such as locking conflicts between
  554. * the page lock and the glock) and return having done no I/O. Its
  555. * obviously not something we'd want to do on too regular a basis.
  556. * Any I/O we ignore at this time will be done via readpage later.
  557. * 2. We don't handle stuffed files here we let readpage do the honours.
  558. * 3. mpage_readpages() does most of the heavy lifting in the common case.
  559. * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
  560. */
  561. static int gfs2_readpages(struct file *file, struct address_space *mapping,
  562. struct list_head *pages, unsigned nr_pages)
  563. {
  564. struct inode *inode = mapping->host;
  565. struct gfs2_inode *ip = GFS2_I(inode);
  566. struct gfs2_sbd *sdp = GFS2_SB(inode);
  567. struct gfs2_holder gh;
  568. int ret;
  569. gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
  570. ret = gfs2_glock_nq(&gh);
  571. if (unlikely(ret))
  572. goto out_uninit;
  573. if (!gfs2_is_stuffed(ip))
  574. ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
  575. gfs2_glock_dq(&gh);
  576. out_uninit:
  577. gfs2_holder_uninit(&gh);
  578. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  579. ret = -EIO;
  580. return ret;
  581. }
  582. /**
  583. * gfs2_write_begin - Begin to write to a file
  584. * @file: The file to write to
  585. * @mapping: The mapping in which to write
  586. * @pos: The file offset at which to start writing
  587. * @len: Length of the write
  588. * @flags: Various flags
  589. * @pagep: Pointer to return the page
  590. * @fsdata: Pointer to return fs data (unused by GFS2)
  591. *
  592. * Returns: errno
  593. */
  594. static int gfs2_write_begin(struct file *file, struct address_space *mapping,
  595. loff_t pos, unsigned len, unsigned flags,
  596. struct page **pagep, void **fsdata)
  597. {
  598. struct gfs2_inode *ip = GFS2_I(mapping->host);
  599. struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
  600. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  601. unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
  602. unsigned requested = 0;
  603. int alloc_required;
  604. int error = 0;
  605. pgoff_t index = pos >> PAGE_SHIFT;
  606. unsigned from = pos & (PAGE_SIZE - 1);
  607. struct page *page;
  608. gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
  609. error = gfs2_glock_nq(&ip->i_gh);
  610. if (unlikely(error))
  611. goto out_uninit;
  612. if (&ip->i_inode == sdp->sd_rindex) {
  613. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE,
  614. GL_NOCACHE, &m_ip->i_gh);
  615. if (unlikely(error)) {
  616. gfs2_glock_dq(&ip->i_gh);
  617. goto out_uninit;
  618. }
  619. }
  620. alloc_required = gfs2_write_alloc_required(ip, pos, len);
  621. if (alloc_required || gfs2_is_jdata(ip))
  622. gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
  623. if (alloc_required) {
  624. struct gfs2_alloc_parms ap = { .aflags = 0, };
  625. requested = data_blocks + ind_blocks;
  626. ap.target = requested;
  627. error = gfs2_quota_lock_check(ip, &ap);
  628. if (error)
  629. goto out_unlock;
  630. error = gfs2_inplace_reserve(ip, &ap);
  631. if (error)
  632. goto out_qunlock;
  633. }
  634. rblocks = RES_DINODE + ind_blocks;
  635. if (gfs2_is_jdata(ip))
  636. rblocks += data_blocks ? data_blocks : 1;
  637. if (ind_blocks || data_blocks)
  638. rblocks += RES_STATFS + RES_QUOTA;
  639. if (&ip->i_inode == sdp->sd_rindex)
  640. rblocks += 2 * RES_STATFS;
  641. if (alloc_required)
  642. rblocks += gfs2_rg_blocks(ip, requested);
  643. error = gfs2_trans_begin(sdp, rblocks,
  644. PAGE_SIZE/sdp->sd_sb.sb_bsize);
  645. if (error)
  646. goto out_trans_fail;
  647. error = -ENOMEM;
  648. flags |= AOP_FLAG_NOFS;
  649. page = grab_cache_page_write_begin(mapping, index, flags);
  650. *pagep = page;
  651. if (unlikely(!page))
  652. goto out_endtrans;
  653. if (gfs2_is_stuffed(ip)) {
  654. error = 0;
  655. if (pos + len > sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode)) {
  656. error = gfs2_unstuff_dinode(ip, page);
  657. if (error == 0)
  658. goto prepare_write;
  659. } else if (!PageUptodate(page)) {
  660. error = stuffed_readpage(ip, page);
  661. }
  662. goto out;
  663. }
  664. prepare_write:
  665. error = __block_write_begin(page, from, len, gfs2_block_map);
  666. out:
  667. if (error == 0)
  668. return 0;
  669. unlock_page(page);
  670. put_page(page);
  671. gfs2_trans_end(sdp);
  672. if (pos + len > ip->i_inode.i_size)
  673. gfs2_trim_blocks(&ip->i_inode);
  674. goto out_trans_fail;
  675. out_endtrans:
  676. gfs2_trans_end(sdp);
  677. out_trans_fail:
  678. if (alloc_required) {
  679. gfs2_inplace_release(ip);
  680. out_qunlock:
  681. gfs2_quota_unlock(ip);
  682. }
  683. out_unlock:
  684. if (&ip->i_inode == sdp->sd_rindex) {
  685. gfs2_glock_dq(&m_ip->i_gh);
  686. gfs2_holder_uninit(&m_ip->i_gh);
  687. }
  688. gfs2_glock_dq(&ip->i_gh);
  689. out_uninit:
  690. gfs2_holder_uninit(&ip->i_gh);
  691. return error;
  692. }
  693. /**
  694. * adjust_fs_space - Adjusts the free space available due to gfs2_grow
  695. * @inode: the rindex inode
  696. */
  697. static void adjust_fs_space(struct inode *inode)
  698. {
  699. struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
  700. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  701. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  702. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  703. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  704. struct buffer_head *m_bh, *l_bh;
  705. u64 fs_total, new_free;
  706. /* Total up the file system space, according to the latest rindex. */
  707. fs_total = gfs2_ri_total(sdp);
  708. if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
  709. return;
  710. spin_lock(&sdp->sd_statfs_spin);
  711. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  712. sizeof(struct gfs2_dinode));
  713. if (fs_total > (m_sc->sc_total + l_sc->sc_total))
  714. new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
  715. else
  716. new_free = 0;
  717. spin_unlock(&sdp->sd_statfs_spin);
  718. fs_warn(sdp, "File system extended by %llu blocks.\n",
  719. (unsigned long long)new_free);
  720. gfs2_statfs_change(sdp, new_free, new_free, 0);
  721. if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
  722. goto out;
  723. update_statfs(sdp, m_bh, l_bh);
  724. brelse(l_bh);
  725. out:
  726. brelse(m_bh);
  727. }
  728. /**
  729. * gfs2_stuffed_write_end - Write end for stuffed files
  730. * @inode: The inode
  731. * @dibh: The buffer_head containing the on-disk inode
  732. * @pos: The file position
  733. * @len: The length of the write
  734. * @copied: How much was actually copied by the VFS
  735. * @page: The page
  736. *
  737. * This copies the data from the page into the inode block after
  738. * the inode data structure itself.
  739. *
  740. * Returns: errno
  741. */
  742. static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
  743. loff_t pos, unsigned len, unsigned copied,
  744. struct page *page)
  745. {
  746. struct gfs2_inode *ip = GFS2_I(inode);
  747. struct gfs2_sbd *sdp = GFS2_SB(inode);
  748. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  749. u64 to = pos + copied;
  750. void *kaddr;
  751. unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
  752. BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
  753. kaddr = kmap_atomic(page);
  754. memcpy(buf + pos, kaddr + pos, copied);
  755. flush_dcache_page(page);
  756. kunmap_atomic(kaddr);
  757. WARN_ON(!PageUptodate(page));
  758. unlock_page(page);
  759. put_page(page);
  760. if (copied) {
  761. if (inode->i_size < to)
  762. i_size_write(inode, to);
  763. mark_inode_dirty(inode);
  764. }
  765. if (inode == sdp->sd_rindex) {
  766. adjust_fs_space(inode);
  767. sdp->sd_rindex_uptodate = 0;
  768. }
  769. brelse(dibh);
  770. gfs2_trans_end(sdp);
  771. if (inode == sdp->sd_rindex) {
  772. gfs2_glock_dq(&m_ip->i_gh);
  773. gfs2_holder_uninit(&m_ip->i_gh);
  774. }
  775. gfs2_glock_dq(&ip->i_gh);
  776. gfs2_holder_uninit(&ip->i_gh);
  777. return copied;
  778. }
  779. /**
  780. * gfs2_write_end
  781. * @file: The file to write to
  782. * @mapping: The address space to write to
  783. * @pos: The file position
  784. * @len: The length of the data
  785. * @copied: How much was actually copied by the VFS
  786. * @page: The page that has been written
  787. * @fsdata: The fsdata (unused in GFS2)
  788. *
  789. * The main write_end function for GFS2. We have a separate one for
  790. * stuffed files as they are slightly different, otherwise we just
  791. * put our locking around the VFS provided functions.
  792. *
  793. * Returns: errno
  794. */
  795. static int gfs2_write_end(struct file *file, struct address_space *mapping,
  796. loff_t pos, unsigned len, unsigned copied,
  797. struct page *page, void *fsdata)
  798. {
  799. struct inode *inode = page->mapping->host;
  800. struct gfs2_inode *ip = GFS2_I(inode);
  801. struct gfs2_sbd *sdp = GFS2_SB(inode);
  802. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  803. struct buffer_head *dibh;
  804. unsigned int from = pos & (PAGE_SIZE - 1);
  805. unsigned int to = from + len;
  806. int ret;
  807. struct gfs2_trans *tr = current->journal_info;
  808. BUG_ON(!tr);
  809. BUG_ON(gfs2_glock_is_locked_by_me(ip->i_gl) == NULL);
  810. ret = gfs2_meta_inode_buffer(ip, &dibh);
  811. if (unlikely(ret)) {
  812. unlock_page(page);
  813. put_page(page);
  814. goto failed;
  815. }
  816. if (gfs2_is_stuffed(ip))
  817. return gfs2_stuffed_write_end(inode, dibh, pos, len, copied, page);
  818. if (!gfs2_is_writeback(ip))
  819. gfs2_page_add_databufs(ip, page, from, to);
  820. ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
  821. if (tr->tr_num_buf_new)
  822. __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
  823. else
  824. gfs2_trans_add_meta(ip->i_gl, dibh);
  825. if (inode == sdp->sd_rindex) {
  826. adjust_fs_space(inode);
  827. sdp->sd_rindex_uptodate = 0;
  828. }
  829. brelse(dibh);
  830. failed:
  831. gfs2_trans_end(sdp);
  832. gfs2_inplace_release(ip);
  833. if (ip->i_qadata && ip->i_qadata->qa_qd_num)
  834. gfs2_quota_unlock(ip);
  835. if (inode == sdp->sd_rindex) {
  836. gfs2_glock_dq(&m_ip->i_gh);
  837. gfs2_holder_uninit(&m_ip->i_gh);
  838. }
  839. gfs2_glock_dq(&ip->i_gh);
  840. gfs2_holder_uninit(&ip->i_gh);
  841. return ret;
  842. }
  843. /**
  844. * gfs2_set_page_dirty - Page dirtying function
  845. * @page: The page to dirty
  846. *
  847. * Returns: 1 if it dirtyed the page, or 0 otherwise
  848. */
  849. static int gfs2_set_page_dirty(struct page *page)
  850. {
  851. SetPageChecked(page);
  852. return __set_page_dirty_buffers(page);
  853. }
  854. /**
  855. * gfs2_bmap - Block map function
  856. * @mapping: Address space info
  857. * @lblock: The block to map
  858. *
  859. * Returns: The disk address for the block or 0 on hole or error
  860. */
  861. static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
  862. {
  863. struct gfs2_inode *ip = GFS2_I(mapping->host);
  864. struct gfs2_holder i_gh;
  865. sector_t dblock = 0;
  866. int error;
  867. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
  868. if (error)
  869. return 0;
  870. if (!gfs2_is_stuffed(ip))
  871. dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
  872. gfs2_glock_dq_uninit(&i_gh);
  873. return dblock;
  874. }
  875. static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
  876. {
  877. struct gfs2_bufdata *bd;
  878. lock_buffer(bh);
  879. gfs2_log_lock(sdp);
  880. clear_buffer_dirty(bh);
  881. bd = bh->b_private;
  882. if (bd) {
  883. if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
  884. list_del_init(&bd->bd_list);
  885. else
  886. gfs2_remove_from_journal(bh, REMOVE_JDATA);
  887. }
  888. bh->b_bdev = NULL;
  889. clear_buffer_mapped(bh);
  890. clear_buffer_req(bh);
  891. clear_buffer_new(bh);
  892. gfs2_log_unlock(sdp);
  893. unlock_buffer(bh);
  894. }
  895. static void gfs2_invalidatepage(struct page *page, unsigned int offset,
  896. unsigned int length)
  897. {
  898. struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
  899. unsigned int stop = offset + length;
  900. int partial_page = (offset || length < PAGE_SIZE);
  901. struct buffer_head *bh, *head;
  902. unsigned long pos = 0;
  903. BUG_ON(!PageLocked(page));
  904. if (!partial_page)
  905. ClearPageChecked(page);
  906. if (!page_has_buffers(page))
  907. goto out;
  908. bh = head = page_buffers(page);
  909. do {
  910. if (pos + bh->b_size > stop)
  911. return;
  912. if (offset <= pos)
  913. gfs2_discard(sdp, bh);
  914. pos += bh->b_size;
  915. bh = bh->b_this_page;
  916. } while (bh != head);
  917. out:
  918. if (!partial_page)
  919. try_to_release_page(page, 0);
  920. }
  921. /**
  922. * gfs2_ok_for_dio - check that dio is valid on this file
  923. * @ip: The inode
  924. * @offset: The offset at which we are reading or writing
  925. *
  926. * Returns: 0 (to ignore the i/o request and thus fall back to buffered i/o)
  927. * 1 (to accept the i/o request)
  928. */
  929. static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset)
  930. {
  931. /*
  932. * Should we return an error here? I can't see that O_DIRECT for
  933. * a stuffed file makes any sense. For now we'll silently fall
  934. * back to buffered I/O
  935. */
  936. if (gfs2_is_stuffed(ip))
  937. return 0;
  938. if (offset >= i_size_read(&ip->i_inode))
  939. return 0;
  940. return 1;
  941. }
  942. static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  943. {
  944. struct file *file = iocb->ki_filp;
  945. struct inode *inode = file->f_mapping->host;
  946. struct address_space *mapping = inode->i_mapping;
  947. struct gfs2_inode *ip = GFS2_I(inode);
  948. loff_t offset = iocb->ki_pos;
  949. struct gfs2_holder gh;
  950. int rv;
  951. /*
  952. * Deferred lock, even if its a write, since we do no allocation
  953. * on this path. All we need change is atime, and this lock mode
  954. * ensures that other nodes have flushed their buffered read caches
  955. * (i.e. their page cache entries for this inode). We do not,
  956. * unfortunately have the option of only flushing a range like
  957. * the VFS does.
  958. */
  959. gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
  960. rv = gfs2_glock_nq(&gh);
  961. if (rv)
  962. goto out_uninit;
  963. rv = gfs2_ok_for_dio(ip, offset);
  964. if (rv != 1)
  965. goto out; /* dio not valid, fall back to buffered i/o */
  966. /*
  967. * Now since we are holding a deferred (CW) lock at this point, you
  968. * might be wondering why this is ever needed. There is a case however
  969. * where we've granted a deferred local lock against a cached exclusive
  970. * glock. That is ok provided all granted local locks are deferred, but
  971. * it also means that it is possible to encounter pages which are
  972. * cached and possibly also mapped. So here we check for that and sort
  973. * them out ahead of the dio. The glock state machine will take care of
  974. * everything else.
  975. *
  976. * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
  977. * the first place, mapping->nr_pages will always be zero.
  978. */
  979. if (mapping->nrpages) {
  980. loff_t lstart = offset & ~(PAGE_SIZE - 1);
  981. loff_t len = iov_iter_count(iter);
  982. loff_t end = PAGE_ALIGN(offset + len) - 1;
  983. rv = 0;
  984. if (len == 0)
  985. goto out;
  986. if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
  987. unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
  988. rv = filemap_write_and_wait_range(mapping, lstart, end);
  989. if (rv)
  990. goto out;
  991. if (iov_iter_rw(iter) == WRITE)
  992. truncate_inode_pages_range(mapping, lstart, end);
  993. }
  994. rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
  995. gfs2_get_block_direct, NULL, NULL, 0);
  996. out:
  997. gfs2_glock_dq(&gh);
  998. out_uninit:
  999. gfs2_holder_uninit(&gh);
  1000. return rv;
  1001. }
  1002. /**
  1003. * gfs2_releasepage - free the metadata associated with a page
  1004. * @page: the page that's being released
  1005. * @gfp_mask: passed from Linux VFS, ignored by us
  1006. *
  1007. * Call try_to_free_buffers() if the buffers in this page can be
  1008. * released.
  1009. *
  1010. * Returns: 0
  1011. */
  1012. int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
  1013. {
  1014. struct address_space *mapping = page->mapping;
  1015. struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
  1016. struct buffer_head *bh, *head;
  1017. struct gfs2_bufdata *bd;
  1018. if (!page_has_buffers(page))
  1019. return 0;
  1020. /*
  1021. * From xfs_vm_releasepage: mm accommodates an old ext3 case where
  1022. * clean pages might not have had the dirty bit cleared. Thus, it can
  1023. * send actual dirty pages to ->releasepage() via shrink_active_list().
  1024. *
  1025. * As a workaround, we skip pages that contain dirty buffers below.
  1026. * Once ->releasepage isn't called on dirty pages anymore, we can warn
  1027. * on dirty buffers like we used to here again.
  1028. */
  1029. gfs2_log_lock(sdp);
  1030. spin_lock(&sdp->sd_ail_lock);
  1031. head = bh = page_buffers(page);
  1032. do {
  1033. if (atomic_read(&bh->b_count))
  1034. goto cannot_release;
  1035. bd = bh->b_private;
  1036. if (bd && bd->bd_tr)
  1037. goto cannot_release;
  1038. if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
  1039. goto cannot_release;
  1040. bh = bh->b_this_page;
  1041. } while(bh != head);
  1042. spin_unlock(&sdp->sd_ail_lock);
  1043. head = bh = page_buffers(page);
  1044. do {
  1045. bd = bh->b_private;
  1046. if (bd) {
  1047. gfs2_assert_warn(sdp, bd->bd_bh == bh);
  1048. if (!list_empty(&bd->bd_list))
  1049. list_del_init(&bd->bd_list);
  1050. bd->bd_bh = NULL;
  1051. bh->b_private = NULL;
  1052. kmem_cache_free(gfs2_bufdata_cachep, bd);
  1053. }
  1054. bh = bh->b_this_page;
  1055. } while (bh != head);
  1056. gfs2_log_unlock(sdp);
  1057. return try_to_free_buffers(page);
  1058. cannot_release:
  1059. spin_unlock(&sdp->sd_ail_lock);
  1060. gfs2_log_unlock(sdp);
  1061. return 0;
  1062. }
  1063. static const struct address_space_operations gfs2_writeback_aops = {
  1064. .writepage = gfs2_writepage,
  1065. .writepages = gfs2_writepages,
  1066. .readpage = gfs2_readpage,
  1067. .readpages = gfs2_readpages,
  1068. .write_begin = gfs2_write_begin,
  1069. .write_end = gfs2_write_end,
  1070. .bmap = gfs2_bmap,
  1071. .invalidatepage = gfs2_invalidatepage,
  1072. .releasepage = gfs2_releasepage,
  1073. .direct_IO = gfs2_direct_IO,
  1074. .migratepage = buffer_migrate_page,
  1075. .is_partially_uptodate = block_is_partially_uptodate,
  1076. .error_remove_page = generic_error_remove_page,
  1077. };
  1078. static const struct address_space_operations gfs2_ordered_aops = {
  1079. .writepage = gfs2_writepage,
  1080. .writepages = gfs2_writepages,
  1081. .readpage = gfs2_readpage,
  1082. .readpages = gfs2_readpages,
  1083. .write_begin = gfs2_write_begin,
  1084. .write_end = gfs2_write_end,
  1085. .set_page_dirty = gfs2_set_page_dirty,
  1086. .bmap = gfs2_bmap,
  1087. .invalidatepage = gfs2_invalidatepage,
  1088. .releasepage = gfs2_releasepage,
  1089. .direct_IO = gfs2_direct_IO,
  1090. .migratepage = buffer_migrate_page,
  1091. .is_partially_uptodate = block_is_partially_uptodate,
  1092. .error_remove_page = generic_error_remove_page,
  1093. };
  1094. static const struct address_space_operations gfs2_jdata_aops = {
  1095. .writepage = gfs2_jdata_writepage,
  1096. .writepages = gfs2_jdata_writepages,
  1097. .readpage = gfs2_readpage,
  1098. .readpages = gfs2_readpages,
  1099. .write_begin = gfs2_write_begin,
  1100. .write_end = gfs2_write_end,
  1101. .set_page_dirty = gfs2_set_page_dirty,
  1102. .bmap = gfs2_bmap,
  1103. .invalidatepage = gfs2_invalidatepage,
  1104. .releasepage = gfs2_releasepage,
  1105. .is_partially_uptodate = block_is_partially_uptodate,
  1106. .error_remove_page = generic_error_remove_page,
  1107. };
  1108. void gfs2_set_aops(struct inode *inode)
  1109. {
  1110. struct gfs2_inode *ip = GFS2_I(inode);
  1111. if (gfs2_is_writeback(ip))
  1112. inode->i_mapping->a_ops = &gfs2_writeback_aops;
  1113. else if (gfs2_is_ordered(ip))
  1114. inode->i_mapping->a_ops = &gfs2_ordered_aops;
  1115. else if (gfs2_is_jdata(ip))
  1116. inode->i_mapping->a_ops = &gfs2_jdata_aops;
  1117. else
  1118. BUG();
  1119. }