xfs_bmap_util.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776
  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * Copyright (c) 2012 Red Hat, Inc.
  4. * All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it would be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write the Free Software Foundation,
  17. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  18. */
  19. #include "xfs.h"
  20. #include "xfs_fs.h"
  21. #include "xfs_shared.h"
  22. #include "xfs_format.h"
  23. #include "xfs_log_format.h"
  24. #include "xfs_trans_resv.h"
  25. #include "xfs_bit.h"
  26. #include "xfs_mount.h"
  27. #include "xfs_da_format.h"
  28. #include "xfs_defer.h"
  29. #include "xfs_inode.h"
  30. #include "xfs_btree.h"
  31. #include "xfs_trans.h"
  32. #include "xfs_extfree_item.h"
  33. #include "xfs_alloc.h"
  34. #include "xfs_bmap.h"
  35. #include "xfs_bmap_util.h"
  36. #include "xfs_bmap_btree.h"
  37. #include "xfs_rtalloc.h"
  38. #include "xfs_error.h"
  39. #include "xfs_quota.h"
  40. #include "xfs_trans_space.h"
  41. #include "xfs_trace.h"
  42. #include "xfs_icache.h"
  43. #include "xfs_log.h"
  44. #include "xfs_rmap_btree.h"
  45. /* Kernel only BMAP related definitions and functions */
  46. /*
  47. * Convert the given file system block to a disk block. We have to treat it
  48. * differently based on whether the file is a real time file or not, because the
  49. * bmap code does.
  50. */
  51. xfs_daddr_t
  52. xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
  53. {
  54. return (XFS_IS_REALTIME_INODE(ip) ? \
  55. (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
  56. XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
  57. }
  58. /*
  59. * Routine to zero an extent on disk allocated to the specific inode.
  60. *
  61. * The VFS functions take a linearised filesystem block offset, so we have to
  62. * convert the sparse xfs fsb to the right format first.
  63. * VFS types are real funky, too.
  64. */
  65. int
  66. xfs_zero_extent(
  67. struct xfs_inode *ip,
  68. xfs_fsblock_t start_fsb,
  69. xfs_off_t count_fsb)
  70. {
  71. struct xfs_mount *mp = ip->i_mount;
  72. xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
  73. sector_t block = XFS_BB_TO_FSBT(mp, sector);
  74. return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
  75. block << (mp->m_super->s_blocksize_bits - 9),
  76. count_fsb << (mp->m_super->s_blocksize_bits - 9),
  77. GFP_NOFS, true);
  78. }
  79. int
  80. xfs_bmap_rtalloc(
  81. struct xfs_bmalloca *ap) /* bmap alloc argument struct */
  82. {
  83. xfs_alloctype_t atype = 0; /* type for allocation routines */
  84. int error; /* error return value */
  85. xfs_mount_t *mp; /* mount point structure */
  86. xfs_extlen_t prod = 0; /* product factor for allocators */
  87. xfs_extlen_t ralen = 0; /* realtime allocation length */
  88. xfs_extlen_t align; /* minimum allocation alignment */
  89. xfs_rtblock_t rtb;
  90. mp = ap->ip->i_mount;
  91. align = xfs_get_extsz_hint(ap->ip);
  92. prod = align / mp->m_sb.sb_rextsize;
  93. error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
  94. align, 1, ap->eof, 0,
  95. ap->conv, &ap->offset, &ap->length);
  96. if (error)
  97. return error;
  98. ASSERT(ap->length);
  99. ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
  100. /*
  101. * If the offset & length are not perfectly aligned
  102. * then kill prod, it will just get us in trouble.
  103. */
  104. if (do_mod(ap->offset, align) || ap->length % align)
  105. prod = 1;
  106. /*
  107. * Set ralen to be the actual requested length in rtextents.
  108. */
  109. ralen = ap->length / mp->m_sb.sb_rextsize;
  110. /*
  111. * If the old value was close enough to MAXEXTLEN that
  112. * we rounded up to it, cut it back so it's valid again.
  113. * Note that if it's a really large request (bigger than
  114. * MAXEXTLEN), we don't hear about that number, and can't
  115. * adjust the starting point to match it.
  116. */
  117. if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
  118. ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
  119. /*
  120. * Lock out modifications to both the RT bitmap and summary inodes
  121. */
  122. xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
  123. xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
  124. xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
  125. xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
  126. /*
  127. * If it's an allocation to an empty file at offset 0,
  128. * pick an extent that will space things out in the rt area.
  129. */
  130. if (ap->eof && ap->offset == 0) {
  131. xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
  132. error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
  133. if (error)
  134. return error;
  135. ap->blkno = rtx * mp->m_sb.sb_rextsize;
  136. } else {
  137. ap->blkno = 0;
  138. }
  139. xfs_bmap_adjacent(ap);
  140. /*
  141. * Realtime allocation, done through xfs_rtallocate_extent.
  142. */
  143. atype = ap->blkno == 0 ? XFS_ALLOCTYPE_ANY_AG : XFS_ALLOCTYPE_NEAR_BNO;
  144. do_div(ap->blkno, mp->m_sb.sb_rextsize);
  145. rtb = ap->blkno;
  146. ap->length = ralen;
  147. if ((error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
  148. &ralen, atype, ap->wasdel, prod, &rtb)))
  149. return error;
  150. if (rtb == NULLFSBLOCK && prod > 1 &&
  151. (error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1,
  152. ap->length, &ralen, atype,
  153. ap->wasdel, 1, &rtb)))
  154. return error;
  155. ap->blkno = rtb;
  156. if (ap->blkno != NULLFSBLOCK) {
  157. ap->blkno *= mp->m_sb.sb_rextsize;
  158. ralen *= mp->m_sb.sb_rextsize;
  159. ap->length = ralen;
  160. ap->ip->i_d.di_nblocks += ralen;
  161. xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
  162. if (ap->wasdel)
  163. ap->ip->i_delayed_blks -= ralen;
  164. /*
  165. * Adjust the disk quota also. This was reserved
  166. * earlier.
  167. */
  168. xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
  169. ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
  170. XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
  171. /* Zero the extent if we were asked to do so */
  172. if (ap->userdata & XFS_ALLOC_USERDATA_ZERO) {
  173. error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
  174. if (error)
  175. return error;
  176. }
  177. } else {
  178. ap->length = 0;
  179. }
  180. return 0;
  181. }
  182. /*
  183. * Check if the endoff is outside the last extent. If so the caller will grow
  184. * the allocation to a stripe unit boundary. All offsets are considered outside
  185. * the end of file for an empty fork, so 1 is returned in *eof in that case.
  186. */
  187. int
  188. xfs_bmap_eof(
  189. struct xfs_inode *ip,
  190. xfs_fileoff_t endoff,
  191. int whichfork,
  192. int *eof)
  193. {
  194. struct xfs_bmbt_irec rec;
  195. int error;
  196. error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
  197. if (error || *eof)
  198. return error;
  199. *eof = endoff >= rec.br_startoff + rec.br_blockcount;
  200. return 0;
  201. }
  202. /*
  203. * Extent tree block counting routines.
  204. */
  205. /*
  206. * Count leaf blocks given a range of extent records.
  207. */
  208. STATIC void
  209. xfs_bmap_count_leaves(
  210. xfs_ifork_t *ifp,
  211. xfs_extnum_t idx,
  212. int numrecs,
  213. int *count)
  214. {
  215. int b;
  216. for (b = 0; b < numrecs; b++) {
  217. xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
  218. *count += xfs_bmbt_get_blockcount(frp);
  219. }
  220. }
  221. /*
  222. * Count leaf blocks given a range of extent records originally
  223. * in btree format.
  224. */
  225. STATIC void
  226. xfs_bmap_disk_count_leaves(
  227. struct xfs_mount *mp,
  228. struct xfs_btree_block *block,
  229. int numrecs,
  230. int *count)
  231. {
  232. int b;
  233. xfs_bmbt_rec_t *frp;
  234. for (b = 1; b <= numrecs; b++) {
  235. frp = XFS_BMBT_REC_ADDR(mp, block, b);
  236. *count += xfs_bmbt_disk_get_blockcount(frp);
  237. }
  238. }
  239. /*
  240. * Recursively walks each level of a btree
  241. * to count total fsblocks in use.
  242. */
  243. STATIC int /* error */
  244. xfs_bmap_count_tree(
  245. xfs_mount_t *mp, /* file system mount point */
  246. xfs_trans_t *tp, /* transaction pointer */
  247. xfs_ifork_t *ifp, /* inode fork pointer */
  248. xfs_fsblock_t blockno, /* file system block number */
  249. int levelin, /* level in btree */
  250. int *count) /* Count of blocks */
  251. {
  252. int error;
  253. xfs_buf_t *bp, *nbp;
  254. int level = levelin;
  255. __be64 *pp;
  256. xfs_fsblock_t bno = blockno;
  257. xfs_fsblock_t nextbno;
  258. struct xfs_btree_block *block, *nextblock;
  259. int numrecs;
  260. error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
  261. &xfs_bmbt_buf_ops);
  262. if (error)
  263. return error;
  264. *count += 1;
  265. block = XFS_BUF_TO_BLOCK(bp);
  266. if (--level) {
  267. /* Not at node above leaves, count this level of nodes */
  268. nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
  269. while (nextbno != NULLFSBLOCK) {
  270. error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
  271. XFS_BMAP_BTREE_REF,
  272. &xfs_bmbt_buf_ops);
  273. if (error)
  274. return error;
  275. *count += 1;
  276. nextblock = XFS_BUF_TO_BLOCK(nbp);
  277. nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
  278. xfs_trans_brelse(tp, nbp);
  279. }
  280. /* Dive to the next level */
  281. pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
  282. bno = be64_to_cpu(*pp);
  283. if (unlikely((error =
  284. xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
  285. xfs_trans_brelse(tp, bp);
  286. XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
  287. XFS_ERRLEVEL_LOW, mp);
  288. return -EFSCORRUPTED;
  289. }
  290. xfs_trans_brelse(tp, bp);
  291. } else {
  292. /* count all level 1 nodes and their leaves */
  293. for (;;) {
  294. nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
  295. numrecs = be16_to_cpu(block->bb_numrecs);
  296. xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
  297. xfs_trans_brelse(tp, bp);
  298. if (nextbno == NULLFSBLOCK)
  299. break;
  300. bno = nextbno;
  301. error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
  302. XFS_BMAP_BTREE_REF,
  303. &xfs_bmbt_buf_ops);
  304. if (error)
  305. return error;
  306. *count += 1;
  307. block = XFS_BUF_TO_BLOCK(bp);
  308. }
  309. }
  310. return 0;
  311. }
  312. /*
  313. * Count fsblocks of the given fork.
  314. */
  315. static int /* error */
  316. xfs_bmap_count_blocks(
  317. xfs_trans_t *tp, /* transaction pointer */
  318. xfs_inode_t *ip, /* incore inode */
  319. int whichfork, /* data or attr fork */
  320. int *count) /* out: count of blocks */
  321. {
  322. struct xfs_btree_block *block; /* current btree block */
  323. xfs_fsblock_t bno; /* block # of "block" */
  324. xfs_ifork_t *ifp; /* fork structure */
  325. int level; /* btree level, for checking */
  326. xfs_mount_t *mp; /* file system mount structure */
  327. __be64 *pp; /* pointer to block address */
  328. bno = NULLFSBLOCK;
  329. mp = ip->i_mount;
  330. ifp = XFS_IFORK_PTR(ip, whichfork);
  331. if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
  332. xfs_bmap_count_leaves(ifp, 0,
  333. ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t),
  334. count);
  335. return 0;
  336. }
  337. /*
  338. * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
  339. */
  340. block = ifp->if_broot;
  341. level = be16_to_cpu(block->bb_level);
  342. ASSERT(level > 0);
  343. pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
  344. bno = be64_to_cpu(*pp);
  345. ASSERT(bno != NULLFSBLOCK);
  346. ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
  347. ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
  348. if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
  349. XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
  350. mp);
  351. return -EFSCORRUPTED;
  352. }
  353. return 0;
  354. }
  355. /*
  356. * returns 1 for success, 0 if we failed to map the extent.
  357. */
  358. STATIC int
  359. xfs_getbmapx_fix_eof_hole(
  360. xfs_inode_t *ip, /* xfs incore inode pointer */
  361. struct getbmapx *out, /* output structure */
  362. int prealloced, /* this is a file with
  363. * preallocated data space */
  364. __int64_t end, /* last block requested */
  365. xfs_fsblock_t startblock)
  366. {
  367. __int64_t fixlen;
  368. xfs_mount_t *mp; /* file system mount point */
  369. xfs_ifork_t *ifp; /* inode fork pointer */
  370. xfs_extnum_t lastx; /* last extent pointer */
  371. xfs_fileoff_t fileblock;
  372. if (startblock == HOLESTARTBLOCK) {
  373. mp = ip->i_mount;
  374. out->bmv_block = -1;
  375. fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
  376. fixlen -= out->bmv_offset;
  377. if (prealloced && out->bmv_offset + out->bmv_length == end) {
  378. /* Came to hole at EOF. Trim it. */
  379. if (fixlen <= 0)
  380. return 0;
  381. out->bmv_length = fixlen;
  382. }
  383. } else {
  384. if (startblock == DELAYSTARTBLOCK)
  385. out->bmv_block = -2;
  386. else
  387. out->bmv_block = xfs_fsb_to_db(ip, startblock);
  388. fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
  389. ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
  390. if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
  391. (lastx == (ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))-1))
  392. out->bmv_oflags |= BMV_OF_LAST;
  393. }
  394. return 1;
  395. }
  396. /*
  397. * Get inode's extents as described in bmv, and format for output.
  398. * Calls formatter to fill the user's buffer until all extents
  399. * are mapped, until the passed-in bmv->bmv_count slots have
  400. * been filled, or until the formatter short-circuits the loop,
  401. * if it is tracking filled-in extents on its own.
  402. */
  403. int /* error code */
  404. xfs_getbmap(
  405. xfs_inode_t *ip,
  406. struct getbmapx *bmv, /* user bmap structure */
  407. xfs_bmap_format_t formatter, /* format to user */
  408. void *arg) /* formatter arg */
  409. {
  410. __int64_t bmvend; /* last block requested */
  411. int error = 0; /* return value */
  412. __int64_t fixlen; /* length for -1 case */
  413. int i; /* extent number */
  414. int lock; /* lock state */
  415. xfs_bmbt_irec_t *map; /* buffer for user's data */
  416. xfs_mount_t *mp; /* file system mount point */
  417. int nex; /* # of user extents can do */
  418. int nexleft; /* # of user extents left */
  419. int subnex; /* # of bmapi's can do */
  420. int nmap; /* number of map entries */
  421. struct getbmapx *out; /* output structure */
  422. int whichfork; /* data or attr fork */
  423. int prealloced; /* this is a file with
  424. * preallocated data space */
  425. int iflags; /* interface flags */
  426. int bmapi_flags; /* flags for xfs_bmapi */
  427. int cur_ext = 0;
  428. mp = ip->i_mount;
  429. iflags = bmv->bmv_iflags;
  430. whichfork = iflags & BMV_IF_ATTRFORK ? XFS_ATTR_FORK : XFS_DATA_FORK;
  431. if (whichfork == XFS_ATTR_FORK) {
  432. if (XFS_IFORK_Q(ip)) {
  433. if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
  434. ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
  435. ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
  436. return -EINVAL;
  437. } else if (unlikely(
  438. ip->i_d.di_aformat != 0 &&
  439. ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
  440. XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
  441. ip->i_mount);
  442. return -EFSCORRUPTED;
  443. }
  444. prealloced = 0;
  445. fixlen = 1LL << 32;
  446. } else {
  447. if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
  448. ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
  449. ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
  450. return -EINVAL;
  451. if (xfs_get_extsz_hint(ip) ||
  452. ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
  453. prealloced = 1;
  454. fixlen = mp->m_super->s_maxbytes;
  455. } else {
  456. prealloced = 0;
  457. fixlen = XFS_ISIZE(ip);
  458. }
  459. }
  460. if (bmv->bmv_length == -1) {
  461. fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
  462. bmv->bmv_length =
  463. max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
  464. } else if (bmv->bmv_length == 0) {
  465. bmv->bmv_entries = 0;
  466. return 0;
  467. } else if (bmv->bmv_length < 0) {
  468. return -EINVAL;
  469. }
  470. nex = bmv->bmv_count - 1;
  471. if (nex <= 0)
  472. return -EINVAL;
  473. bmvend = bmv->bmv_offset + bmv->bmv_length;
  474. if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
  475. return -ENOMEM;
  476. out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
  477. if (!out)
  478. return -ENOMEM;
  479. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  480. if (whichfork == XFS_DATA_FORK) {
  481. if (!(iflags & BMV_IF_DELALLOC) &&
  482. (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
  483. error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
  484. if (error)
  485. goto out_unlock_iolock;
  486. /*
  487. * Even after flushing the inode, there can still be
  488. * delalloc blocks on the inode beyond EOF due to
  489. * speculative preallocation. These are not removed
  490. * until the release function is called or the inode
  491. * is inactivated. Hence we cannot assert here that
  492. * ip->i_delayed_blks == 0.
  493. */
  494. }
  495. lock = xfs_ilock_data_map_shared(ip);
  496. } else {
  497. lock = xfs_ilock_attr_map_shared(ip);
  498. }
  499. /*
  500. * Don't let nex be bigger than the number of extents
  501. * we can have assuming alternating holes and real extents.
  502. */
  503. if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
  504. nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
  505. bmapi_flags = xfs_bmapi_aflag(whichfork);
  506. if (!(iflags & BMV_IF_PREALLOC))
  507. bmapi_flags |= XFS_BMAPI_IGSTATE;
  508. /*
  509. * Allocate enough space to handle "subnex" maps at a time.
  510. */
  511. error = -ENOMEM;
  512. subnex = 16;
  513. map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
  514. if (!map)
  515. goto out_unlock_ilock;
  516. bmv->bmv_entries = 0;
  517. if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
  518. (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
  519. error = 0;
  520. goto out_free_map;
  521. }
  522. nexleft = nex;
  523. do {
  524. nmap = (nexleft > subnex) ? subnex : nexleft;
  525. error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
  526. XFS_BB_TO_FSB(mp, bmv->bmv_length),
  527. map, &nmap, bmapi_flags);
  528. if (error)
  529. goto out_free_map;
  530. ASSERT(nmap <= subnex);
  531. for (i = 0; i < nmap && nexleft && bmv->bmv_length; i++) {
  532. out[cur_ext].bmv_oflags = 0;
  533. if (map[i].br_state == XFS_EXT_UNWRITTEN)
  534. out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
  535. else if (map[i].br_startblock == DELAYSTARTBLOCK)
  536. out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
  537. out[cur_ext].bmv_offset =
  538. XFS_FSB_TO_BB(mp, map[i].br_startoff);
  539. out[cur_ext].bmv_length =
  540. XFS_FSB_TO_BB(mp, map[i].br_blockcount);
  541. out[cur_ext].bmv_unused1 = 0;
  542. out[cur_ext].bmv_unused2 = 0;
  543. /*
  544. * delayed allocation extents that start beyond EOF can
  545. * occur due to speculative EOF allocation when the
  546. * delalloc extent is larger than the largest freespace
  547. * extent at conversion time. These extents cannot be
  548. * converted by data writeback, so can exist here even
  549. * if we are not supposed to be finding delalloc
  550. * extents.
  551. */
  552. if (map[i].br_startblock == DELAYSTARTBLOCK &&
  553. map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
  554. ASSERT((iflags & BMV_IF_DELALLOC) != 0);
  555. if (map[i].br_startblock == HOLESTARTBLOCK &&
  556. whichfork == XFS_ATTR_FORK) {
  557. /* came to the end of attribute fork */
  558. out[cur_ext].bmv_oflags |= BMV_OF_LAST;
  559. goto out_free_map;
  560. }
  561. if (!xfs_getbmapx_fix_eof_hole(ip, &out[cur_ext],
  562. prealloced, bmvend,
  563. map[i].br_startblock))
  564. goto out_free_map;
  565. bmv->bmv_offset =
  566. out[cur_ext].bmv_offset +
  567. out[cur_ext].bmv_length;
  568. bmv->bmv_length =
  569. max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
  570. /*
  571. * In case we don't want to return the hole,
  572. * don't increase cur_ext so that we can reuse
  573. * it in the next loop.
  574. */
  575. if ((iflags & BMV_IF_NO_HOLES) &&
  576. map[i].br_startblock == HOLESTARTBLOCK) {
  577. memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
  578. continue;
  579. }
  580. nexleft--;
  581. bmv->bmv_entries++;
  582. cur_ext++;
  583. }
  584. } while (nmap && nexleft && bmv->bmv_length);
  585. out_free_map:
  586. kmem_free(map);
  587. out_unlock_ilock:
  588. xfs_iunlock(ip, lock);
  589. out_unlock_iolock:
  590. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  591. for (i = 0; i < cur_ext; i++) {
  592. int full = 0; /* user array is full */
  593. /* format results & advance arg */
  594. error = formatter(&arg, &out[i], &full);
  595. if (error || full)
  596. break;
  597. }
  598. kmem_free(out);
  599. return error;
  600. }
  601. /*
  602. * dead simple method of punching delalyed allocation blocks from a range in
  603. * the inode. Walks a block at a time so will be slow, but is only executed in
  604. * rare error cases so the overhead is not critical. This will always punch out
  605. * both the start and end blocks, even if the ranges only partially overlap
  606. * them, so it is up to the caller to ensure that partial blocks are not
  607. * passed in.
  608. */
  609. int
  610. xfs_bmap_punch_delalloc_range(
  611. struct xfs_inode *ip,
  612. xfs_fileoff_t start_fsb,
  613. xfs_fileoff_t length)
  614. {
  615. xfs_fileoff_t remaining = length;
  616. int error = 0;
  617. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  618. do {
  619. int done;
  620. xfs_bmbt_irec_t imap;
  621. int nimaps = 1;
  622. xfs_fsblock_t firstblock;
  623. struct xfs_defer_ops dfops;
  624. /*
  625. * Map the range first and check that it is a delalloc extent
  626. * before trying to unmap the range. Otherwise we will be
  627. * trying to remove a real extent (which requires a
  628. * transaction) or a hole, which is probably a bad idea...
  629. */
  630. error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
  631. XFS_BMAPI_ENTIRE);
  632. if (error) {
  633. /* something screwed, just bail */
  634. if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  635. xfs_alert(ip->i_mount,
  636. "Failed delalloc mapping lookup ino %lld fsb %lld.",
  637. ip->i_ino, start_fsb);
  638. }
  639. break;
  640. }
  641. if (!nimaps) {
  642. /* nothing there */
  643. goto next_block;
  644. }
  645. if (imap.br_startblock != DELAYSTARTBLOCK) {
  646. /* been converted, ignore */
  647. goto next_block;
  648. }
  649. WARN_ON(imap.br_blockcount == 0);
  650. /*
  651. * Note: while we initialise the firstblock/dfops pair, they
  652. * should never be used because blocks should never be
  653. * allocated or freed for a delalloc extent and hence we need
  654. * don't cancel or finish them after the xfs_bunmapi() call.
  655. */
  656. xfs_defer_init(&dfops, &firstblock);
  657. error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
  658. &dfops, &done);
  659. if (error)
  660. break;
  661. ASSERT(!xfs_defer_has_unfinished_work(&dfops));
  662. next_block:
  663. start_fsb++;
  664. remaining--;
  665. } while(remaining > 0);
  666. return error;
  667. }
  668. /*
  669. * Test whether it is appropriate to check an inode for and free post EOF
  670. * blocks. The 'force' parameter determines whether we should also consider
  671. * regular files that are marked preallocated or append-only.
  672. */
  673. bool
  674. xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
  675. {
  676. /* prealloc/delalloc exists only on regular files */
  677. if (!S_ISREG(VFS_I(ip)->i_mode))
  678. return false;
  679. /*
  680. * Zero sized files with no cached pages and delalloc blocks will not
  681. * have speculative prealloc/delalloc blocks to remove.
  682. */
  683. if (VFS_I(ip)->i_size == 0 &&
  684. VFS_I(ip)->i_mapping->nrpages == 0 &&
  685. ip->i_delayed_blks == 0)
  686. return false;
  687. /* If we haven't read in the extent list, then don't do it now. */
  688. if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
  689. return false;
  690. /*
  691. * Do not free real preallocated or append-only files unless the file
  692. * has delalloc blocks and we are forced to remove them.
  693. */
  694. if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
  695. if (!force || ip->i_delayed_blks == 0)
  696. return false;
  697. return true;
  698. }
  699. /*
  700. * This is called by xfs_inactive to free any blocks beyond eof
  701. * when the link count isn't zero and by xfs_dm_punch_hole() when
  702. * punching a hole to EOF.
  703. */
  704. int
  705. xfs_free_eofblocks(
  706. xfs_mount_t *mp,
  707. xfs_inode_t *ip,
  708. bool need_iolock)
  709. {
  710. xfs_trans_t *tp;
  711. int error;
  712. xfs_fileoff_t end_fsb;
  713. xfs_fileoff_t last_fsb;
  714. xfs_filblks_t map_len;
  715. int nimaps;
  716. xfs_bmbt_irec_t imap;
  717. /*
  718. * Figure out if there are any blocks beyond the end
  719. * of the file. If not, then there is nothing to do.
  720. */
  721. end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
  722. last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  723. if (last_fsb <= end_fsb)
  724. return 0;
  725. map_len = last_fsb - end_fsb;
  726. nimaps = 1;
  727. xfs_ilock(ip, XFS_ILOCK_SHARED);
  728. error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
  729. xfs_iunlock(ip, XFS_ILOCK_SHARED);
  730. if (!error && (nimaps != 0) &&
  731. (imap.br_startblock != HOLESTARTBLOCK ||
  732. ip->i_delayed_blks)) {
  733. /*
  734. * Attach the dquots to the inode up front.
  735. */
  736. error = xfs_qm_dqattach(ip, 0);
  737. if (error)
  738. return error;
  739. /*
  740. * There are blocks after the end of file.
  741. * Free them up now by truncating the file to
  742. * its current size.
  743. */
  744. if (need_iolock) {
  745. if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
  746. return -EAGAIN;
  747. }
  748. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
  749. &tp);
  750. if (error) {
  751. ASSERT(XFS_FORCED_SHUTDOWN(mp));
  752. if (need_iolock)
  753. xfs_iunlock(ip, XFS_IOLOCK_EXCL);
  754. return error;
  755. }
  756. xfs_ilock(ip, XFS_ILOCK_EXCL);
  757. xfs_trans_ijoin(tp, ip, 0);
  758. /*
  759. * Do not update the on-disk file size. If we update the
  760. * on-disk file size and then the system crashes before the
  761. * contents of the file are flushed to disk then the files
  762. * may be full of holes (ie NULL files bug).
  763. */
  764. error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
  765. XFS_ISIZE(ip));
  766. if (error) {
  767. /*
  768. * If we get an error at this point we simply don't
  769. * bother truncating the file.
  770. */
  771. xfs_trans_cancel(tp);
  772. } else {
  773. error = xfs_trans_commit(tp);
  774. if (!error)
  775. xfs_inode_clear_eofblocks_tag(ip);
  776. }
  777. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  778. if (need_iolock)
  779. xfs_iunlock(ip, XFS_IOLOCK_EXCL);
  780. }
  781. return error;
  782. }
  783. int
  784. xfs_alloc_file_space(
  785. struct xfs_inode *ip,
  786. xfs_off_t offset,
  787. xfs_off_t len,
  788. int alloc_type)
  789. {
  790. xfs_mount_t *mp = ip->i_mount;
  791. xfs_off_t count;
  792. xfs_filblks_t allocated_fsb;
  793. xfs_filblks_t allocatesize_fsb;
  794. xfs_extlen_t extsz, temp;
  795. xfs_fileoff_t startoffset_fsb;
  796. xfs_fsblock_t firstfsb;
  797. int nimaps;
  798. int quota_flag;
  799. int rt;
  800. xfs_trans_t *tp;
  801. xfs_bmbt_irec_t imaps[1], *imapp;
  802. struct xfs_defer_ops dfops;
  803. uint qblocks, resblks, resrtextents;
  804. int error;
  805. trace_xfs_alloc_file_space(ip);
  806. if (XFS_FORCED_SHUTDOWN(mp))
  807. return -EIO;
  808. error = xfs_qm_dqattach(ip, 0);
  809. if (error)
  810. return error;
  811. if (len <= 0)
  812. return -EINVAL;
  813. rt = XFS_IS_REALTIME_INODE(ip);
  814. extsz = xfs_get_extsz_hint(ip);
  815. count = len;
  816. imapp = &imaps[0];
  817. nimaps = 1;
  818. startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
  819. allocatesize_fsb = XFS_B_TO_FSB(mp, count);
  820. /*
  821. * Allocate file space until done or until there is an error
  822. */
  823. while (allocatesize_fsb && !error) {
  824. xfs_fileoff_t s, e;
  825. /*
  826. * Determine space reservations for data/realtime.
  827. */
  828. if (unlikely(extsz)) {
  829. s = startoffset_fsb;
  830. do_div(s, extsz);
  831. s *= extsz;
  832. e = startoffset_fsb + allocatesize_fsb;
  833. if ((temp = do_mod(startoffset_fsb, extsz)))
  834. e += temp;
  835. if ((temp = do_mod(e, extsz)))
  836. e += extsz - temp;
  837. } else {
  838. s = 0;
  839. e = allocatesize_fsb;
  840. }
  841. /*
  842. * The transaction reservation is limited to a 32-bit block
  843. * count, hence we need to limit the number of blocks we are
  844. * trying to reserve to avoid an overflow. We can't allocate
  845. * more than @nimaps extents, and an extent is limited on disk
  846. * to MAXEXTLEN (21 bits), so use that to enforce the limit.
  847. */
  848. resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
  849. if (unlikely(rt)) {
  850. resrtextents = qblocks = resblks;
  851. resrtextents /= mp->m_sb.sb_rextsize;
  852. resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
  853. quota_flag = XFS_QMOPT_RES_RTBLKS;
  854. } else {
  855. resrtextents = 0;
  856. resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
  857. quota_flag = XFS_QMOPT_RES_REGBLKS;
  858. }
  859. /*
  860. * Allocate and setup the transaction.
  861. */
  862. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
  863. resrtextents, 0, &tp);
  864. /*
  865. * Check for running out of space
  866. */
  867. if (error) {
  868. /*
  869. * Free the transaction structure.
  870. */
  871. ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
  872. break;
  873. }
  874. xfs_ilock(ip, XFS_ILOCK_EXCL);
  875. error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
  876. 0, quota_flag);
  877. if (error)
  878. goto error1;
  879. xfs_trans_ijoin(tp, ip, 0);
  880. xfs_defer_init(&dfops, &firstfsb);
  881. error = xfs_bmapi_write(tp, ip, startoffset_fsb,
  882. allocatesize_fsb, alloc_type, &firstfsb,
  883. resblks, imapp, &nimaps, &dfops);
  884. if (error)
  885. goto error0;
  886. /*
  887. * Complete the transaction
  888. */
  889. error = xfs_defer_finish(&tp, &dfops, NULL);
  890. if (error)
  891. goto error0;
  892. error = xfs_trans_commit(tp);
  893. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  894. if (error)
  895. break;
  896. allocated_fsb = imapp->br_blockcount;
  897. if (nimaps == 0) {
  898. error = -ENOSPC;
  899. break;
  900. }
  901. startoffset_fsb += allocated_fsb;
  902. allocatesize_fsb -= allocated_fsb;
  903. }
  904. return error;
  905. error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
  906. xfs_defer_cancel(&dfops);
  907. xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
  908. error1: /* Just cancel transaction */
  909. xfs_trans_cancel(tp);
  910. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  911. return error;
  912. }
  913. static int
  914. xfs_unmap_extent(
  915. struct xfs_inode *ip,
  916. xfs_fileoff_t startoffset_fsb,
  917. xfs_filblks_t len_fsb,
  918. int *done)
  919. {
  920. struct xfs_mount *mp = ip->i_mount;
  921. struct xfs_trans *tp;
  922. struct xfs_defer_ops dfops;
  923. xfs_fsblock_t firstfsb;
  924. uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
  925. int error;
  926. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
  927. if (error) {
  928. ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
  929. return error;
  930. }
  931. xfs_ilock(ip, XFS_ILOCK_EXCL);
  932. error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
  933. ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
  934. if (error)
  935. goto out_trans_cancel;
  936. xfs_trans_ijoin(tp, ip, 0);
  937. xfs_defer_init(&dfops, &firstfsb);
  938. error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
  939. &dfops, done);
  940. if (error)
  941. goto out_bmap_cancel;
  942. error = xfs_defer_finish(&tp, &dfops, ip);
  943. if (error)
  944. goto out_bmap_cancel;
  945. error = xfs_trans_commit(tp);
  946. out_unlock:
  947. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  948. return error;
  949. out_bmap_cancel:
  950. xfs_defer_cancel(&dfops);
  951. out_trans_cancel:
  952. xfs_trans_cancel(tp);
  953. goto out_unlock;
  954. }
  955. static int
  956. xfs_adjust_extent_unmap_boundaries(
  957. struct xfs_inode *ip,
  958. xfs_fileoff_t *startoffset_fsb,
  959. xfs_fileoff_t *endoffset_fsb)
  960. {
  961. struct xfs_mount *mp = ip->i_mount;
  962. struct xfs_bmbt_irec imap;
  963. int nimap, error;
  964. xfs_extlen_t mod = 0;
  965. nimap = 1;
  966. error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
  967. if (error)
  968. return error;
  969. if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
  970. xfs_daddr_t block;
  971. ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
  972. block = imap.br_startblock;
  973. mod = do_div(block, mp->m_sb.sb_rextsize);
  974. if (mod)
  975. *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
  976. }
  977. nimap = 1;
  978. error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
  979. if (error)
  980. return error;
  981. if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
  982. ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
  983. mod++;
  984. if (mod && mod != mp->m_sb.sb_rextsize)
  985. *endoffset_fsb -= mod;
  986. }
  987. return 0;
  988. }
  989. static int
  990. xfs_flush_unmap_range(
  991. struct xfs_inode *ip,
  992. xfs_off_t offset,
  993. xfs_off_t len)
  994. {
  995. struct xfs_mount *mp = ip->i_mount;
  996. struct inode *inode = VFS_I(ip);
  997. xfs_off_t rounding, start, end;
  998. int error;
  999. /* wait for the completion of any pending DIOs */
  1000. inode_dio_wait(inode);
  1001. rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
  1002. start = round_down(offset, rounding);
  1003. end = round_up(offset + len, rounding) - 1;
  1004. error = filemap_write_and_wait_range(inode->i_mapping, start, end);
  1005. if (error)
  1006. return error;
  1007. truncate_pagecache_range(inode, start, end);
  1008. return 0;
  1009. }
  1010. int
  1011. xfs_free_file_space(
  1012. struct xfs_inode *ip,
  1013. xfs_off_t offset,
  1014. xfs_off_t len)
  1015. {
  1016. struct xfs_mount *mp = ip->i_mount;
  1017. xfs_fileoff_t startoffset_fsb;
  1018. xfs_fileoff_t endoffset_fsb;
  1019. int done = 0, error;
  1020. trace_xfs_free_file_space(ip);
  1021. error = xfs_qm_dqattach(ip, 0);
  1022. if (error)
  1023. return error;
  1024. if (len <= 0) /* if nothing being freed */
  1025. return 0;
  1026. error = xfs_flush_unmap_range(ip, offset, len);
  1027. if (error)
  1028. return error;
  1029. startoffset_fsb = XFS_B_TO_FSB(mp, offset);
  1030. endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
  1031. /*
  1032. * Need to zero the stuff we're not freeing, on disk. If it's a RT file
  1033. * and we can't use unwritten extents then we actually need to ensure
  1034. * to zero the whole extent, otherwise we just need to take of block
  1035. * boundaries, and xfs_bunmapi will handle the rest.
  1036. */
  1037. if (XFS_IS_REALTIME_INODE(ip) &&
  1038. !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
  1039. error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
  1040. &endoffset_fsb);
  1041. if (error)
  1042. return error;
  1043. }
  1044. if (endoffset_fsb > startoffset_fsb) {
  1045. while (!done) {
  1046. error = xfs_unmap_extent(ip, startoffset_fsb,
  1047. endoffset_fsb - startoffset_fsb, &done);
  1048. if (error)
  1049. return error;
  1050. }
  1051. }
  1052. /*
  1053. * Now that we've unmap all full blocks we'll have to zero out any
  1054. * partial block at the beginning and/or end. xfs_zero_range is
  1055. * smart enough to skip any holes, including those we just created.
  1056. */
  1057. return xfs_zero_range(ip, offset, len, NULL);
  1058. }
  1059. /*
  1060. * Preallocate and zero a range of a file. This mechanism has the allocation
  1061. * semantics of fallocate and in addition converts data in the range to zeroes.
  1062. */
  1063. int
  1064. xfs_zero_file_space(
  1065. struct xfs_inode *ip,
  1066. xfs_off_t offset,
  1067. xfs_off_t len)
  1068. {
  1069. struct xfs_mount *mp = ip->i_mount;
  1070. uint blksize;
  1071. int error;
  1072. trace_xfs_zero_file_space(ip);
  1073. blksize = 1 << mp->m_sb.sb_blocklog;
  1074. /*
  1075. * Punch a hole and prealloc the range. We use hole punch rather than
  1076. * unwritten extent conversion for two reasons:
  1077. *
  1078. * 1.) Hole punch handles partial block zeroing for us.
  1079. *
  1080. * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
  1081. * by virtue of the hole punch.
  1082. */
  1083. error = xfs_free_file_space(ip, offset, len);
  1084. if (error)
  1085. goto out;
  1086. error = xfs_alloc_file_space(ip, round_down(offset, blksize),
  1087. round_up(offset + len, blksize) -
  1088. round_down(offset, blksize),
  1089. XFS_BMAPI_PREALLOC);
  1090. out:
  1091. return error;
  1092. }
  1093. /*
  1094. * @next_fsb will keep track of the extent currently undergoing shift.
  1095. * @stop_fsb will keep track of the extent at which we have to stop.
  1096. * If we are shifting left, we will start with block (offset + len) and
  1097. * shift each extent till last extent.
  1098. * If we are shifting right, we will start with last extent inside file space
  1099. * and continue until we reach the block corresponding to offset.
  1100. */
  1101. static int
  1102. xfs_shift_file_space(
  1103. struct xfs_inode *ip,
  1104. xfs_off_t offset,
  1105. xfs_off_t len,
  1106. enum shift_direction direction)
  1107. {
  1108. int done = 0;
  1109. struct xfs_mount *mp = ip->i_mount;
  1110. struct xfs_trans *tp;
  1111. int error;
  1112. struct xfs_defer_ops dfops;
  1113. xfs_fsblock_t first_block;
  1114. xfs_fileoff_t stop_fsb;
  1115. xfs_fileoff_t next_fsb;
  1116. xfs_fileoff_t shift_fsb;
  1117. ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
  1118. if (direction == SHIFT_LEFT) {
  1119. next_fsb = XFS_B_TO_FSB(mp, offset + len);
  1120. stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
  1121. } else {
  1122. /*
  1123. * If right shift, delegate the work of initialization of
  1124. * next_fsb to xfs_bmap_shift_extent as it has ilock held.
  1125. */
  1126. next_fsb = NULLFSBLOCK;
  1127. stop_fsb = XFS_B_TO_FSB(mp, offset);
  1128. }
  1129. shift_fsb = XFS_B_TO_FSB(mp, len);
  1130. /*
  1131. * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
  1132. * into the accessible region of the file.
  1133. */
  1134. if (xfs_can_free_eofblocks(ip, true)) {
  1135. error = xfs_free_eofblocks(mp, ip, false);
  1136. if (error)
  1137. return error;
  1138. }
  1139. /*
  1140. * Writeback and invalidate cache for the remainder of the file as we're
  1141. * about to shift down every extent from offset to EOF.
  1142. */
  1143. error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
  1144. offset, -1);
  1145. if (error)
  1146. return error;
  1147. error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
  1148. offset >> PAGE_SHIFT, -1);
  1149. if (error)
  1150. return error;
  1151. /*
  1152. * The extent shiting code works on extent granularity. So, if
  1153. * stop_fsb is not the starting block of extent, we need to split
  1154. * the extent at stop_fsb.
  1155. */
  1156. if (direction == SHIFT_RIGHT) {
  1157. error = xfs_bmap_split_extent(ip, stop_fsb);
  1158. if (error)
  1159. return error;
  1160. }
  1161. while (!error && !done) {
  1162. /*
  1163. * We would need to reserve permanent block for transaction.
  1164. * This will come into picture when after shifting extent into
  1165. * hole we found that adjacent extents can be merged which
  1166. * may lead to freeing of a block during record update.
  1167. */
  1168. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
  1169. XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
  1170. if (error)
  1171. break;
  1172. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1173. error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
  1174. ip->i_gdquot, ip->i_pdquot,
  1175. XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
  1176. XFS_QMOPT_RES_REGBLKS);
  1177. if (error)
  1178. goto out_trans_cancel;
  1179. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  1180. xfs_defer_init(&dfops, &first_block);
  1181. /*
  1182. * We are using the write transaction in which max 2 bmbt
  1183. * updates are allowed
  1184. */
  1185. error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
  1186. &done, stop_fsb, &first_block, &dfops,
  1187. direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
  1188. if (error)
  1189. goto out_bmap_cancel;
  1190. error = xfs_defer_finish(&tp, &dfops, NULL);
  1191. if (error)
  1192. goto out_bmap_cancel;
  1193. error = xfs_trans_commit(tp);
  1194. }
  1195. return error;
  1196. out_bmap_cancel:
  1197. xfs_defer_cancel(&dfops);
  1198. out_trans_cancel:
  1199. xfs_trans_cancel(tp);
  1200. return error;
  1201. }
  1202. /*
  1203. * xfs_collapse_file_space()
  1204. * This routine frees disk space and shift extent for the given file.
  1205. * The first thing we do is to free data blocks in the specified range
  1206. * by calling xfs_free_file_space(). It would also sync dirty data
  1207. * and invalidate page cache over the region on which collapse range
  1208. * is working. And Shift extent records to the left to cover a hole.
  1209. * RETURNS:
  1210. * 0 on success
  1211. * errno on error
  1212. *
  1213. */
  1214. int
  1215. xfs_collapse_file_space(
  1216. struct xfs_inode *ip,
  1217. xfs_off_t offset,
  1218. xfs_off_t len)
  1219. {
  1220. int error;
  1221. ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  1222. trace_xfs_collapse_file_space(ip);
  1223. error = xfs_free_file_space(ip, offset, len);
  1224. if (error)
  1225. return error;
  1226. return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
  1227. }
  1228. /*
  1229. * xfs_insert_file_space()
  1230. * This routine create hole space by shifting extents for the given file.
  1231. * The first thing we do is to sync dirty data and invalidate page cache
  1232. * over the region on which insert range is working. And split an extent
  1233. * to two extents at given offset by calling xfs_bmap_split_extent.
  1234. * And shift all extent records which are laying between [offset,
  1235. * last allocated extent] to the right to reserve hole range.
  1236. * RETURNS:
  1237. * 0 on success
  1238. * errno on error
  1239. */
  1240. int
  1241. xfs_insert_file_space(
  1242. struct xfs_inode *ip,
  1243. loff_t offset,
  1244. loff_t len)
  1245. {
  1246. ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  1247. trace_xfs_insert_file_space(ip);
  1248. return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
  1249. }
  1250. /*
  1251. * We need to check that the format of the data fork in the temporary inode is
  1252. * valid for the target inode before doing the swap. This is not a problem with
  1253. * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
  1254. * data fork depending on the space the attribute fork is taking so we can get
  1255. * invalid formats on the target inode.
  1256. *
  1257. * E.g. target has space for 7 extents in extent format, temp inode only has
  1258. * space for 6. If we defragment down to 7 extents, then the tmp format is a
  1259. * btree, but when swapped it needs to be in extent format. Hence we can't just
  1260. * blindly swap data forks on attr2 filesystems.
  1261. *
  1262. * Note that we check the swap in both directions so that we don't end up with
  1263. * a corrupt temporary inode, either.
  1264. *
  1265. * Note that fixing the way xfs_fsr sets up the attribute fork in the source
  1266. * inode will prevent this situation from occurring, so all we do here is
  1267. * reject and log the attempt. basically we are putting the responsibility on
  1268. * userspace to get this right.
  1269. */
  1270. static int
  1271. xfs_swap_extents_check_format(
  1272. xfs_inode_t *ip, /* target inode */
  1273. xfs_inode_t *tip) /* tmp inode */
  1274. {
  1275. /* Should never get a local format */
  1276. if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
  1277. tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
  1278. return -EINVAL;
  1279. /*
  1280. * if the target inode has less extents that then temporary inode then
  1281. * why did userspace call us?
  1282. */
  1283. if (ip->i_d.di_nextents < tip->i_d.di_nextents)
  1284. return -EINVAL;
  1285. /*
  1286. * if the target inode is in extent form and the temp inode is in btree
  1287. * form then we will end up with the target inode in the wrong format
  1288. * as we already know there are less extents in the temp inode.
  1289. */
  1290. if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
  1291. tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
  1292. return -EINVAL;
  1293. /* Check temp in extent form to max in target */
  1294. if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
  1295. XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
  1296. XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
  1297. return -EINVAL;
  1298. /* Check target in extent form to max in temp */
  1299. if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
  1300. XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
  1301. XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
  1302. return -EINVAL;
  1303. /*
  1304. * If we are in a btree format, check that the temp root block will fit
  1305. * in the target and that it has enough extents to be in btree format
  1306. * in the target.
  1307. *
  1308. * Note that we have to be careful to allow btree->extent conversions
  1309. * (a common defrag case) which will occur when the temp inode is in
  1310. * extent format...
  1311. */
  1312. if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
  1313. if (XFS_IFORK_BOFF(ip) &&
  1314. XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
  1315. return -EINVAL;
  1316. if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
  1317. XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
  1318. return -EINVAL;
  1319. }
  1320. /* Reciprocal target->temp btree format checks */
  1321. if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
  1322. if (XFS_IFORK_BOFF(tip) &&
  1323. XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
  1324. return -EINVAL;
  1325. if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
  1326. XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
  1327. return -EINVAL;
  1328. }
  1329. return 0;
  1330. }
  1331. static int
  1332. xfs_swap_extent_flush(
  1333. struct xfs_inode *ip)
  1334. {
  1335. int error;
  1336. error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
  1337. if (error)
  1338. return error;
  1339. truncate_pagecache_range(VFS_I(ip), 0, -1);
  1340. /* Verify O_DIRECT for ftmp */
  1341. if (VFS_I(ip)->i_mapping->nrpages)
  1342. return -EINVAL;
  1343. return 0;
  1344. }
  1345. int
  1346. xfs_swap_extents(
  1347. xfs_inode_t *ip, /* target inode */
  1348. xfs_inode_t *tip, /* tmp inode */
  1349. xfs_swapext_t *sxp)
  1350. {
  1351. xfs_mount_t *mp = ip->i_mount;
  1352. xfs_trans_t *tp;
  1353. xfs_bstat_t *sbp = &sxp->sx_stat;
  1354. xfs_ifork_t *tempifp, *ifp, *tifp;
  1355. int src_log_flags, target_log_flags;
  1356. int error = 0;
  1357. int aforkblks = 0;
  1358. int taforkblks = 0;
  1359. __uint64_t tmp;
  1360. int lock_flags;
  1361. /* XXX: we can't do this with rmap, will fix later */
  1362. if (xfs_sb_version_hasrmapbt(&mp->m_sb))
  1363. return -EOPNOTSUPP;
  1364. tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
  1365. if (!tempifp) {
  1366. error = -ENOMEM;
  1367. goto out;
  1368. }
  1369. /*
  1370. * Lock the inodes against other IO, page faults and truncate to
  1371. * begin with. Then we can ensure the inodes are flushed and have no
  1372. * page cache safely. Once we have done this we can take the ilocks and
  1373. * do the rest of the checks.
  1374. */
  1375. lock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
  1376. xfs_lock_two_inodes(ip, tip, XFS_IOLOCK_EXCL);
  1377. xfs_lock_two_inodes(ip, tip, XFS_MMAPLOCK_EXCL);
  1378. /* Verify that both files have the same format */
  1379. if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
  1380. error = -EINVAL;
  1381. goto out_unlock;
  1382. }
  1383. /* Verify both files are either real-time or non-realtime */
  1384. if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
  1385. error = -EINVAL;
  1386. goto out_unlock;
  1387. }
  1388. error = xfs_swap_extent_flush(ip);
  1389. if (error)
  1390. goto out_unlock;
  1391. error = xfs_swap_extent_flush(tip);
  1392. if (error)
  1393. goto out_unlock;
  1394. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
  1395. if (error)
  1396. goto out_unlock;
  1397. /*
  1398. * Lock and join the inodes to the tansaction so that transaction commit
  1399. * or cancel will unlock the inodes from this point onwards.
  1400. */
  1401. xfs_lock_two_inodes(ip, tip, XFS_ILOCK_EXCL);
  1402. lock_flags |= XFS_ILOCK_EXCL;
  1403. xfs_trans_ijoin(tp, ip, lock_flags);
  1404. xfs_trans_ijoin(tp, tip, lock_flags);
  1405. /* Verify all data are being swapped */
  1406. if (sxp->sx_offset != 0 ||
  1407. sxp->sx_length != ip->i_d.di_size ||
  1408. sxp->sx_length != tip->i_d.di_size) {
  1409. error = -EFAULT;
  1410. goto out_trans_cancel;
  1411. }
  1412. trace_xfs_swap_extent_before(ip, 0);
  1413. trace_xfs_swap_extent_before(tip, 1);
  1414. /* check inode formats now that data is flushed */
  1415. error = xfs_swap_extents_check_format(ip, tip);
  1416. if (error) {
  1417. xfs_notice(mp,
  1418. "%s: inode 0x%llx format is incompatible for exchanging.",
  1419. __func__, ip->i_ino);
  1420. goto out_trans_cancel;
  1421. }
  1422. /*
  1423. * Compare the current change & modify times with that
  1424. * passed in. If they differ, we abort this swap.
  1425. * This is the mechanism used to ensure the calling
  1426. * process that the file was not changed out from
  1427. * under it.
  1428. */
  1429. if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
  1430. (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
  1431. (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
  1432. (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
  1433. error = -EBUSY;
  1434. goto out_trans_cancel;
  1435. }
  1436. /*
  1437. * Count the number of extended attribute blocks
  1438. */
  1439. if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
  1440. (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
  1441. error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &aforkblks);
  1442. if (error)
  1443. goto out_trans_cancel;
  1444. }
  1445. if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
  1446. (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
  1447. error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
  1448. &taforkblks);
  1449. if (error)
  1450. goto out_trans_cancel;
  1451. }
  1452. /*
  1453. * Before we've swapped the forks, lets set the owners of the forks
  1454. * appropriately. We have to do this as we are demand paging the btree
  1455. * buffers, and so the validation done on read will expect the owner
  1456. * field to be correctly set. Once we change the owners, we can swap the
  1457. * inode forks.
  1458. *
  1459. * Note the trickiness in setting the log flags - we set the owner log
  1460. * flag on the opposite inode (i.e. the inode we are setting the new
  1461. * owner to be) because once we swap the forks and log that, log
  1462. * recovery is going to see the fork as owned by the swapped inode,
  1463. * not the pre-swapped inodes.
  1464. */
  1465. src_log_flags = XFS_ILOG_CORE;
  1466. target_log_flags = XFS_ILOG_CORE;
  1467. if (ip->i_d.di_version == 3 &&
  1468. ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
  1469. target_log_flags |= XFS_ILOG_DOWNER;
  1470. error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
  1471. tip->i_ino, NULL);
  1472. if (error)
  1473. goto out_trans_cancel;
  1474. }
  1475. if (tip->i_d.di_version == 3 &&
  1476. tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
  1477. src_log_flags |= XFS_ILOG_DOWNER;
  1478. error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
  1479. ip->i_ino, NULL);
  1480. if (error)
  1481. goto out_trans_cancel;
  1482. }
  1483. /*
  1484. * Swap the data forks of the inodes
  1485. */
  1486. ifp = &ip->i_df;
  1487. tifp = &tip->i_df;
  1488. *tempifp = *ifp; /* struct copy */
  1489. *ifp = *tifp; /* struct copy */
  1490. *tifp = *tempifp; /* struct copy */
  1491. /*
  1492. * Fix the on-disk inode values
  1493. */
  1494. tmp = (__uint64_t)ip->i_d.di_nblocks;
  1495. ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
  1496. tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
  1497. tmp = (__uint64_t) ip->i_d.di_nextents;
  1498. ip->i_d.di_nextents = tip->i_d.di_nextents;
  1499. tip->i_d.di_nextents = tmp;
  1500. tmp = (__uint64_t) ip->i_d.di_format;
  1501. ip->i_d.di_format = tip->i_d.di_format;
  1502. tip->i_d.di_format = tmp;
  1503. /*
  1504. * The extents in the source inode could still contain speculative
  1505. * preallocation beyond EOF (e.g. the file is open but not modified
  1506. * while defrag is in progress). In that case, we need to copy over the
  1507. * number of delalloc blocks the data fork in the source inode is
  1508. * tracking beyond EOF so that when the fork is truncated away when the
  1509. * temporary inode is unlinked we don't underrun the i_delayed_blks
  1510. * counter on that inode.
  1511. */
  1512. ASSERT(tip->i_delayed_blks == 0);
  1513. tip->i_delayed_blks = ip->i_delayed_blks;
  1514. ip->i_delayed_blks = 0;
  1515. switch (ip->i_d.di_format) {
  1516. case XFS_DINODE_FMT_EXTENTS:
  1517. /* If the extents fit in the inode, fix the
  1518. * pointer. Otherwise it's already NULL or
  1519. * pointing to the extent.
  1520. */
  1521. if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
  1522. ifp->if_u1.if_extents =
  1523. ifp->if_u2.if_inline_ext;
  1524. }
  1525. src_log_flags |= XFS_ILOG_DEXT;
  1526. break;
  1527. case XFS_DINODE_FMT_BTREE:
  1528. ASSERT(ip->i_d.di_version < 3 ||
  1529. (src_log_flags & XFS_ILOG_DOWNER));
  1530. src_log_flags |= XFS_ILOG_DBROOT;
  1531. break;
  1532. }
  1533. switch (tip->i_d.di_format) {
  1534. case XFS_DINODE_FMT_EXTENTS:
  1535. /* If the extents fit in the inode, fix the
  1536. * pointer. Otherwise it's already NULL or
  1537. * pointing to the extent.
  1538. */
  1539. if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
  1540. tifp->if_u1.if_extents =
  1541. tifp->if_u2.if_inline_ext;
  1542. }
  1543. target_log_flags |= XFS_ILOG_DEXT;
  1544. break;
  1545. case XFS_DINODE_FMT_BTREE:
  1546. target_log_flags |= XFS_ILOG_DBROOT;
  1547. ASSERT(tip->i_d.di_version < 3 ||
  1548. (target_log_flags & XFS_ILOG_DOWNER));
  1549. break;
  1550. }
  1551. xfs_trans_log_inode(tp, ip, src_log_flags);
  1552. xfs_trans_log_inode(tp, tip, target_log_flags);
  1553. /*
  1554. * If this is a synchronous mount, make sure that the
  1555. * transaction goes to disk before returning to the user.
  1556. */
  1557. if (mp->m_flags & XFS_MOUNT_WSYNC)
  1558. xfs_trans_set_sync(tp);
  1559. error = xfs_trans_commit(tp);
  1560. trace_xfs_swap_extent_after(ip, 0);
  1561. trace_xfs_swap_extent_after(tip, 1);
  1562. out:
  1563. kmem_free(tempifp);
  1564. return error;
  1565. out_unlock:
  1566. xfs_iunlock(ip, lock_flags);
  1567. xfs_iunlock(tip, lock_flags);
  1568. goto out;
  1569. out_trans_cancel:
  1570. xfs_trans_cancel(tp);
  1571. goto out;
  1572. }