xfs_bmap_util.c 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  4. * Copyright (c) 2012 Red Hat, Inc.
  5. * All Rights Reserved.
  6. */
  7. #include "xfs.h"
  8. #include "xfs_fs.h"
  9. #include "xfs_shared.h"
  10. #include "xfs_format.h"
  11. #include "xfs_log_format.h"
  12. #include "xfs_trans_resv.h"
  13. #include "xfs_bit.h"
  14. #include "xfs_mount.h"
  15. #include "xfs_da_format.h"
  16. #include "xfs_defer.h"
  17. #include "xfs_inode.h"
  18. #include "xfs_btree.h"
  19. #include "xfs_trans.h"
  20. #include "xfs_extfree_item.h"
  21. #include "xfs_alloc.h"
  22. #include "xfs_bmap.h"
  23. #include "xfs_bmap_util.h"
  24. #include "xfs_bmap_btree.h"
  25. #include "xfs_rtalloc.h"
  26. #include "xfs_error.h"
  27. #include "xfs_quota.h"
  28. #include "xfs_trans_space.h"
  29. #include "xfs_trace.h"
  30. #include "xfs_icache.h"
  31. #include "xfs_log.h"
  32. #include "xfs_rmap_btree.h"
  33. #include "xfs_iomap.h"
  34. #include "xfs_reflink.h"
  35. #include "xfs_refcount.h"
  36. /* Kernel only BMAP related definitions and functions */
  37. /*
  38. * Convert the given file system block to a disk block. We have to treat it
  39. * differently based on whether the file is a real time file or not, because the
  40. * bmap code does.
  41. */
  42. xfs_daddr_t
  43. xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
  44. {
  45. return (XFS_IS_REALTIME_INODE(ip) ? \
  46. (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
  47. XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
  48. }
  49. /*
  50. * Routine to zero an extent on disk allocated to the specific inode.
  51. *
  52. * The VFS functions take a linearised filesystem block offset, so we have to
  53. * convert the sparse xfs fsb to the right format first.
  54. * VFS types are real funky, too.
  55. */
  56. int
  57. xfs_zero_extent(
  58. struct xfs_inode *ip,
  59. xfs_fsblock_t start_fsb,
  60. xfs_off_t count_fsb)
  61. {
  62. struct xfs_mount *mp = ip->i_mount;
  63. xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
  64. sector_t block = XFS_BB_TO_FSBT(mp, sector);
  65. return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
  66. block << (mp->m_super->s_blocksize_bits - 9),
  67. count_fsb << (mp->m_super->s_blocksize_bits - 9),
  68. GFP_NOFS, 0);
  69. }
  70. #ifdef CONFIG_XFS_RT
  71. int
  72. xfs_bmap_rtalloc(
  73. struct xfs_bmalloca *ap) /* bmap alloc argument struct */
  74. {
  75. int error; /* error return value */
  76. xfs_mount_t *mp; /* mount point structure */
  77. xfs_extlen_t prod = 0; /* product factor for allocators */
  78. xfs_extlen_t mod = 0; /* product factor for allocators */
  79. xfs_extlen_t ralen = 0; /* realtime allocation length */
  80. xfs_extlen_t align; /* minimum allocation alignment */
  81. xfs_rtblock_t rtb;
  82. mp = ap->ip->i_mount;
  83. align = xfs_get_extsz_hint(ap->ip);
  84. prod = align / mp->m_sb.sb_rextsize;
  85. error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
  86. align, 1, ap->eof, 0,
  87. ap->conv, &ap->offset, &ap->length);
  88. if (error)
  89. return error;
  90. ASSERT(ap->length);
  91. ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
  92. /*
  93. * If the offset & length are not perfectly aligned
  94. * then kill prod, it will just get us in trouble.
  95. */
  96. div_u64_rem(ap->offset, align, &mod);
  97. if (mod || ap->length % align)
  98. prod = 1;
  99. /*
  100. * Set ralen to be the actual requested length in rtextents.
  101. */
  102. ralen = ap->length / mp->m_sb.sb_rextsize;
  103. /*
  104. * If the old value was close enough to MAXEXTLEN that
  105. * we rounded up to it, cut it back so it's valid again.
  106. * Note that if it's a really large request (bigger than
  107. * MAXEXTLEN), we don't hear about that number, and can't
  108. * adjust the starting point to match it.
  109. */
  110. if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
  111. ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
  112. /*
  113. * Lock out modifications to both the RT bitmap and summary inodes
  114. */
  115. xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
  116. xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
  117. xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
  118. xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
  119. /*
  120. * If it's an allocation to an empty file at offset 0,
  121. * pick an extent that will space things out in the rt area.
  122. */
  123. if (ap->eof && ap->offset == 0) {
  124. xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
  125. error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
  126. if (error)
  127. return error;
  128. ap->blkno = rtx * mp->m_sb.sb_rextsize;
  129. } else {
  130. ap->blkno = 0;
  131. }
  132. xfs_bmap_adjacent(ap);
  133. /*
  134. * Realtime allocation, done through xfs_rtallocate_extent.
  135. */
  136. do_div(ap->blkno, mp->m_sb.sb_rextsize);
  137. rtb = ap->blkno;
  138. ap->length = ralen;
  139. error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
  140. &ralen, ap->wasdel, prod, &rtb);
  141. if (error)
  142. return error;
  143. ap->blkno = rtb;
  144. if (ap->blkno != NULLFSBLOCK) {
  145. ap->blkno *= mp->m_sb.sb_rextsize;
  146. ralen *= mp->m_sb.sb_rextsize;
  147. ap->length = ralen;
  148. ap->ip->i_d.di_nblocks += ralen;
  149. xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
  150. if (ap->wasdel)
  151. ap->ip->i_delayed_blks -= ralen;
  152. /*
  153. * Adjust the disk quota also. This was reserved
  154. * earlier.
  155. */
  156. xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
  157. ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
  158. XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
  159. /* Zero the extent if we were asked to do so */
  160. if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
  161. error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
  162. if (error)
  163. return error;
  164. }
  165. } else {
  166. ap->length = 0;
  167. }
  168. return 0;
  169. }
  170. #endif /* CONFIG_XFS_RT */
  171. /*
  172. * Check if the endoff is outside the last extent. If so the caller will grow
  173. * the allocation to a stripe unit boundary. All offsets are considered outside
  174. * the end of file for an empty fork, so 1 is returned in *eof in that case.
  175. */
  176. int
  177. xfs_bmap_eof(
  178. struct xfs_inode *ip,
  179. xfs_fileoff_t endoff,
  180. int whichfork,
  181. int *eof)
  182. {
  183. struct xfs_bmbt_irec rec;
  184. int error;
  185. error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
  186. if (error || *eof)
  187. return error;
  188. *eof = endoff >= rec.br_startoff + rec.br_blockcount;
  189. return 0;
  190. }
  191. /*
  192. * Extent tree block counting routines.
  193. */
  194. /*
  195. * Count leaf blocks given a range of extent records. Delayed allocation
  196. * extents are not counted towards the totals.
  197. */
  198. xfs_extnum_t
  199. xfs_bmap_count_leaves(
  200. struct xfs_ifork *ifp,
  201. xfs_filblks_t *count)
  202. {
  203. struct xfs_iext_cursor icur;
  204. struct xfs_bmbt_irec got;
  205. xfs_extnum_t numrecs = 0;
  206. for_each_xfs_iext(ifp, &icur, &got) {
  207. if (!isnullstartblock(got.br_startblock)) {
  208. *count += got.br_blockcount;
  209. numrecs++;
  210. }
  211. }
  212. return numrecs;
  213. }
  214. /*
  215. * Count leaf blocks given a range of extent records originally
  216. * in btree format.
  217. */
  218. STATIC void
  219. xfs_bmap_disk_count_leaves(
  220. struct xfs_mount *mp,
  221. struct xfs_btree_block *block,
  222. int numrecs,
  223. xfs_filblks_t *count)
  224. {
  225. int b;
  226. xfs_bmbt_rec_t *frp;
  227. for (b = 1; b <= numrecs; b++) {
  228. frp = XFS_BMBT_REC_ADDR(mp, block, b);
  229. *count += xfs_bmbt_disk_get_blockcount(frp);
  230. }
  231. }
  232. /*
  233. * Recursively walks each level of a btree
  234. * to count total fsblocks in use.
  235. */
  236. STATIC int
  237. xfs_bmap_count_tree(
  238. struct xfs_mount *mp,
  239. struct xfs_trans *tp,
  240. struct xfs_ifork *ifp,
  241. xfs_fsblock_t blockno,
  242. int levelin,
  243. xfs_extnum_t *nextents,
  244. xfs_filblks_t *count)
  245. {
  246. int error;
  247. struct xfs_buf *bp, *nbp;
  248. int level = levelin;
  249. __be64 *pp;
  250. xfs_fsblock_t bno = blockno;
  251. xfs_fsblock_t nextbno;
  252. struct xfs_btree_block *block, *nextblock;
  253. int numrecs;
  254. error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
  255. &xfs_bmbt_buf_ops);
  256. if (error)
  257. return error;
  258. *count += 1;
  259. block = XFS_BUF_TO_BLOCK(bp);
  260. if (--level) {
  261. /* Not at node above leaves, count this level of nodes */
  262. nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
  263. while (nextbno != NULLFSBLOCK) {
  264. error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
  265. XFS_BMAP_BTREE_REF,
  266. &xfs_bmbt_buf_ops);
  267. if (error)
  268. return error;
  269. *count += 1;
  270. nextblock = XFS_BUF_TO_BLOCK(nbp);
  271. nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
  272. xfs_trans_brelse(tp, nbp);
  273. }
  274. /* Dive to the next level */
  275. pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
  276. bno = be64_to_cpu(*pp);
  277. error = xfs_bmap_count_tree(mp, tp, ifp, bno, level, nextents,
  278. count);
  279. if (error) {
  280. xfs_trans_brelse(tp, bp);
  281. XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
  282. XFS_ERRLEVEL_LOW, mp);
  283. return -EFSCORRUPTED;
  284. }
  285. xfs_trans_brelse(tp, bp);
  286. } else {
  287. /* count all level 1 nodes and their leaves */
  288. for (;;) {
  289. nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
  290. numrecs = be16_to_cpu(block->bb_numrecs);
  291. (*nextents) += numrecs;
  292. xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
  293. xfs_trans_brelse(tp, bp);
  294. if (nextbno == NULLFSBLOCK)
  295. break;
  296. bno = nextbno;
  297. error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
  298. XFS_BMAP_BTREE_REF,
  299. &xfs_bmbt_buf_ops);
  300. if (error)
  301. return error;
  302. *count += 1;
  303. block = XFS_BUF_TO_BLOCK(bp);
  304. }
  305. }
  306. return 0;
  307. }
  308. /*
  309. * Count fsblocks of the given fork. Delayed allocation extents are
  310. * not counted towards the totals.
  311. */
  312. int
  313. xfs_bmap_count_blocks(
  314. struct xfs_trans *tp,
  315. struct xfs_inode *ip,
  316. int whichfork,
  317. xfs_extnum_t *nextents,
  318. xfs_filblks_t *count)
  319. {
  320. struct xfs_mount *mp; /* file system mount structure */
  321. __be64 *pp; /* pointer to block address */
  322. struct xfs_btree_block *block; /* current btree block */
  323. struct xfs_ifork *ifp; /* fork structure */
  324. xfs_fsblock_t bno; /* block # of "block" */
  325. int level; /* btree level, for checking */
  326. int error;
  327. bno = NULLFSBLOCK;
  328. mp = ip->i_mount;
  329. *nextents = 0;
  330. *count = 0;
  331. ifp = XFS_IFORK_PTR(ip, whichfork);
  332. if (!ifp)
  333. return 0;
  334. switch (XFS_IFORK_FORMAT(ip, whichfork)) {
  335. case XFS_DINODE_FMT_EXTENTS:
  336. *nextents = xfs_bmap_count_leaves(ifp, count);
  337. return 0;
  338. case XFS_DINODE_FMT_BTREE:
  339. if (!(ifp->if_flags & XFS_IFEXTENTS)) {
  340. error = xfs_iread_extents(tp, ip, whichfork);
  341. if (error)
  342. return error;
  343. }
  344. /*
  345. * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
  346. */
  347. block = ifp->if_broot;
  348. level = be16_to_cpu(block->bb_level);
  349. ASSERT(level > 0);
  350. pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
  351. bno = be64_to_cpu(*pp);
  352. ASSERT(bno != NULLFSBLOCK);
  353. ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
  354. ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
  355. error = xfs_bmap_count_tree(mp, tp, ifp, bno, level,
  356. nextents, count);
  357. if (error) {
  358. XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)",
  359. XFS_ERRLEVEL_LOW, mp);
  360. return -EFSCORRUPTED;
  361. }
  362. return 0;
  363. }
  364. return 0;
  365. }
  366. static int
  367. xfs_getbmap_report_one(
  368. struct xfs_inode *ip,
  369. struct getbmapx *bmv,
  370. struct kgetbmap *out,
  371. int64_t bmv_end,
  372. struct xfs_bmbt_irec *got)
  373. {
  374. struct kgetbmap *p = out + bmv->bmv_entries;
  375. bool shared = false, trimmed = false;
  376. int error;
  377. error = xfs_reflink_trim_around_shared(ip, got, &shared, &trimmed);
  378. if (error)
  379. return error;
  380. if (isnullstartblock(got->br_startblock) ||
  381. got->br_startblock == DELAYSTARTBLOCK) {
  382. /*
  383. * Delalloc extents that start beyond EOF can occur due to
  384. * speculative EOF allocation when the delalloc extent is larger
  385. * than the largest freespace extent at conversion time. These
  386. * extents cannot be converted by data writeback, so can exist
  387. * here even if we are not supposed to be finding delalloc
  388. * extents.
  389. */
  390. if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
  391. ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
  392. p->bmv_oflags |= BMV_OF_DELALLOC;
  393. p->bmv_block = -2;
  394. } else {
  395. p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
  396. }
  397. if (got->br_state == XFS_EXT_UNWRITTEN &&
  398. (bmv->bmv_iflags & BMV_IF_PREALLOC))
  399. p->bmv_oflags |= BMV_OF_PREALLOC;
  400. if (shared)
  401. p->bmv_oflags |= BMV_OF_SHARED;
  402. p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
  403. p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
  404. bmv->bmv_offset = p->bmv_offset + p->bmv_length;
  405. bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
  406. bmv->bmv_entries++;
  407. return 0;
  408. }
  409. static void
  410. xfs_getbmap_report_hole(
  411. struct xfs_inode *ip,
  412. struct getbmapx *bmv,
  413. struct kgetbmap *out,
  414. int64_t bmv_end,
  415. xfs_fileoff_t bno,
  416. xfs_fileoff_t end)
  417. {
  418. struct kgetbmap *p = out + bmv->bmv_entries;
  419. if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
  420. return;
  421. p->bmv_block = -1;
  422. p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
  423. p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
  424. bmv->bmv_offset = p->bmv_offset + p->bmv_length;
  425. bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
  426. bmv->bmv_entries++;
  427. }
  428. static inline bool
  429. xfs_getbmap_full(
  430. struct getbmapx *bmv)
  431. {
  432. return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
  433. }
  434. static bool
  435. xfs_getbmap_next_rec(
  436. struct xfs_bmbt_irec *rec,
  437. xfs_fileoff_t total_end)
  438. {
  439. xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
  440. if (end == total_end)
  441. return false;
  442. rec->br_startoff += rec->br_blockcount;
  443. if (!isnullstartblock(rec->br_startblock) &&
  444. rec->br_startblock != DELAYSTARTBLOCK)
  445. rec->br_startblock += rec->br_blockcount;
  446. rec->br_blockcount = total_end - end;
  447. return true;
  448. }
  449. /*
  450. * Get inode's extents as described in bmv, and format for output.
  451. * Calls formatter to fill the user's buffer until all extents
  452. * are mapped, until the passed-in bmv->bmv_count slots have
  453. * been filled, or until the formatter short-circuits the loop,
  454. * if it is tracking filled-in extents on its own.
  455. */
  456. int /* error code */
  457. xfs_getbmap(
  458. struct xfs_inode *ip,
  459. struct getbmapx *bmv, /* user bmap structure */
  460. struct kgetbmap *out)
  461. {
  462. struct xfs_mount *mp = ip->i_mount;
  463. int iflags = bmv->bmv_iflags;
  464. int whichfork, lock, error = 0;
  465. int64_t bmv_end, max_len;
  466. xfs_fileoff_t bno, first_bno;
  467. struct xfs_ifork *ifp;
  468. struct xfs_bmbt_irec got, rec;
  469. xfs_filblks_t len;
  470. struct xfs_iext_cursor icur;
  471. if (bmv->bmv_iflags & ~BMV_IF_VALID)
  472. return -EINVAL;
  473. #ifndef DEBUG
  474. /* Only allow CoW fork queries if we're debugging. */
  475. if (iflags & BMV_IF_COWFORK)
  476. return -EINVAL;
  477. #endif
  478. if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
  479. return -EINVAL;
  480. if (bmv->bmv_length < -1)
  481. return -EINVAL;
  482. bmv->bmv_entries = 0;
  483. if (bmv->bmv_length == 0)
  484. return 0;
  485. if (iflags & BMV_IF_ATTRFORK)
  486. whichfork = XFS_ATTR_FORK;
  487. else if (iflags & BMV_IF_COWFORK)
  488. whichfork = XFS_COW_FORK;
  489. else
  490. whichfork = XFS_DATA_FORK;
  491. ifp = XFS_IFORK_PTR(ip, whichfork);
  492. xfs_ilock(ip, XFS_IOLOCK_SHARED);
  493. switch (whichfork) {
  494. case XFS_ATTR_FORK:
  495. if (!XFS_IFORK_Q(ip))
  496. goto out_unlock_iolock;
  497. max_len = 1LL << 32;
  498. lock = xfs_ilock_attr_map_shared(ip);
  499. break;
  500. case XFS_COW_FORK:
  501. /* No CoW fork? Just return */
  502. if (!ifp)
  503. goto out_unlock_iolock;
  504. if (xfs_get_cowextsz_hint(ip))
  505. max_len = mp->m_super->s_maxbytes;
  506. else
  507. max_len = XFS_ISIZE(ip);
  508. lock = XFS_ILOCK_SHARED;
  509. xfs_ilock(ip, lock);
  510. break;
  511. case XFS_DATA_FORK:
  512. if (!(iflags & BMV_IF_DELALLOC) &&
  513. (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
  514. error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
  515. if (error)
  516. goto out_unlock_iolock;
  517. /*
  518. * Even after flushing the inode, there can still be
  519. * delalloc blocks on the inode beyond EOF due to
  520. * speculative preallocation. These are not removed
  521. * until the release function is called or the inode
  522. * is inactivated. Hence we cannot assert here that
  523. * ip->i_delayed_blks == 0.
  524. */
  525. }
  526. if (xfs_get_extsz_hint(ip) ||
  527. (ip->i_d.di_flags &
  528. (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
  529. max_len = mp->m_super->s_maxbytes;
  530. else
  531. max_len = XFS_ISIZE(ip);
  532. lock = xfs_ilock_data_map_shared(ip);
  533. break;
  534. }
  535. switch (XFS_IFORK_FORMAT(ip, whichfork)) {
  536. case XFS_DINODE_FMT_EXTENTS:
  537. case XFS_DINODE_FMT_BTREE:
  538. break;
  539. case XFS_DINODE_FMT_LOCAL:
  540. /* Local format inode forks report no extents. */
  541. goto out_unlock_ilock;
  542. default:
  543. error = -EINVAL;
  544. goto out_unlock_ilock;
  545. }
  546. if (bmv->bmv_length == -1) {
  547. max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
  548. bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
  549. }
  550. bmv_end = bmv->bmv_offset + bmv->bmv_length;
  551. first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
  552. len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
  553. if (!(ifp->if_flags & XFS_IFEXTENTS)) {
  554. error = xfs_iread_extents(NULL, ip, whichfork);
  555. if (error)
  556. goto out_unlock_ilock;
  557. }
  558. if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
  559. /*
  560. * Report a whole-file hole if the delalloc flag is set to
  561. * stay compatible with the old implementation.
  562. */
  563. if (iflags & BMV_IF_DELALLOC)
  564. xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
  565. XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
  566. goto out_unlock_ilock;
  567. }
  568. while (!xfs_getbmap_full(bmv)) {
  569. xfs_trim_extent(&got, first_bno, len);
  570. /*
  571. * Report an entry for a hole if this extent doesn't directly
  572. * follow the previous one.
  573. */
  574. if (got.br_startoff > bno) {
  575. xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
  576. got.br_startoff);
  577. if (xfs_getbmap_full(bmv))
  578. break;
  579. }
  580. /*
  581. * In order to report shared extents accurately, we report each
  582. * distinct shared / unshared part of a single bmbt record with
  583. * an individual getbmapx record.
  584. */
  585. bno = got.br_startoff + got.br_blockcount;
  586. rec = got;
  587. do {
  588. error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
  589. &rec);
  590. if (error || xfs_getbmap_full(bmv))
  591. goto out_unlock_ilock;
  592. } while (xfs_getbmap_next_rec(&rec, bno));
  593. if (!xfs_iext_next_extent(ifp, &icur, &got)) {
  594. xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
  595. out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
  596. if (whichfork != XFS_ATTR_FORK && bno < end &&
  597. !xfs_getbmap_full(bmv)) {
  598. xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
  599. bno, end);
  600. }
  601. break;
  602. }
  603. if (bno >= first_bno + len)
  604. break;
  605. }
  606. out_unlock_ilock:
  607. xfs_iunlock(ip, lock);
  608. out_unlock_iolock:
  609. xfs_iunlock(ip, XFS_IOLOCK_SHARED);
  610. return error;
  611. }
  612. /*
  613. * dead simple method of punching delalyed allocation blocks from a range in
  614. * the inode. Walks a block at a time so will be slow, but is only executed in
  615. * rare error cases so the overhead is not critical. This will always punch out
  616. * both the start and end blocks, even if the ranges only partially overlap
  617. * them, so it is up to the caller to ensure that partial blocks are not
  618. * passed in.
  619. */
  620. int
  621. xfs_bmap_punch_delalloc_range(
  622. struct xfs_inode *ip,
  623. xfs_fileoff_t start_fsb,
  624. xfs_fileoff_t length)
  625. {
  626. xfs_fileoff_t remaining = length;
  627. int error = 0;
  628. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  629. do {
  630. int done;
  631. xfs_bmbt_irec_t imap;
  632. int nimaps = 1;
  633. xfs_fsblock_t firstblock;
  634. struct xfs_defer_ops dfops;
  635. /*
  636. * Map the range first and check that it is a delalloc extent
  637. * before trying to unmap the range. Otherwise we will be
  638. * trying to remove a real extent (which requires a
  639. * transaction) or a hole, which is probably a bad idea...
  640. */
  641. error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
  642. XFS_BMAPI_ENTIRE);
  643. if (error) {
  644. /* something screwed, just bail */
  645. if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
  646. xfs_alert(ip->i_mount,
  647. "Failed delalloc mapping lookup ino %lld fsb %lld.",
  648. ip->i_ino, start_fsb);
  649. }
  650. break;
  651. }
  652. if (!nimaps) {
  653. /* nothing there */
  654. goto next_block;
  655. }
  656. if (imap.br_startblock != DELAYSTARTBLOCK) {
  657. /* been converted, ignore */
  658. goto next_block;
  659. }
  660. WARN_ON(imap.br_blockcount == 0);
  661. /*
  662. * Note: while we initialise the firstblock/dfops pair, they
  663. * should never be used because blocks should never be
  664. * allocated or freed for a delalloc extent and hence we need
  665. * don't cancel or finish them after the xfs_bunmapi() call.
  666. */
  667. xfs_defer_init(&dfops, &firstblock);
  668. error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
  669. &dfops, &done);
  670. if (error)
  671. break;
  672. ASSERT(!xfs_defer_has_unfinished_work(&dfops));
  673. next_block:
  674. start_fsb++;
  675. remaining--;
  676. } while(remaining > 0);
  677. return error;
  678. }
  679. /*
  680. * Test whether it is appropriate to check an inode for and free post EOF
  681. * blocks. The 'force' parameter determines whether we should also consider
  682. * regular files that are marked preallocated or append-only.
  683. */
  684. bool
  685. xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
  686. {
  687. /* prealloc/delalloc exists only on regular files */
  688. if (!S_ISREG(VFS_I(ip)->i_mode))
  689. return false;
  690. /*
  691. * Zero sized files with no cached pages and delalloc blocks will not
  692. * have speculative prealloc/delalloc blocks to remove.
  693. */
  694. if (VFS_I(ip)->i_size == 0 &&
  695. VFS_I(ip)->i_mapping->nrpages == 0 &&
  696. ip->i_delayed_blks == 0)
  697. return false;
  698. /* If we haven't read in the extent list, then don't do it now. */
  699. if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
  700. return false;
  701. /*
  702. * Do not free real preallocated or append-only files unless the file
  703. * has delalloc blocks and we are forced to remove them.
  704. */
  705. if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
  706. if (!force || ip->i_delayed_blks == 0)
  707. return false;
  708. return true;
  709. }
  710. /*
  711. * This is called to free any blocks beyond eof. The caller must hold
  712. * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
  713. * reference to the inode.
  714. */
  715. int
  716. xfs_free_eofblocks(
  717. struct xfs_inode *ip)
  718. {
  719. struct xfs_trans *tp;
  720. int error;
  721. xfs_fileoff_t end_fsb;
  722. xfs_fileoff_t last_fsb;
  723. xfs_filblks_t map_len;
  724. int nimaps;
  725. struct xfs_bmbt_irec imap;
  726. struct xfs_mount *mp = ip->i_mount;
  727. /*
  728. * Figure out if there are any blocks beyond the end
  729. * of the file. If not, then there is nothing to do.
  730. */
  731. end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
  732. last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  733. if (last_fsb <= end_fsb)
  734. return 0;
  735. map_len = last_fsb - end_fsb;
  736. nimaps = 1;
  737. xfs_ilock(ip, XFS_ILOCK_SHARED);
  738. error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
  739. xfs_iunlock(ip, XFS_ILOCK_SHARED);
  740. /*
  741. * If there are blocks after the end of file, truncate the file to its
  742. * current size to free them up.
  743. */
  744. if (!error && (nimaps != 0) &&
  745. (imap.br_startblock != HOLESTARTBLOCK ||
  746. ip->i_delayed_blks)) {
  747. /*
  748. * Attach the dquots to the inode up front.
  749. */
  750. error = xfs_qm_dqattach(ip);
  751. if (error)
  752. return error;
  753. /* wait on dio to ensure i_size has settled */
  754. inode_dio_wait(VFS_I(ip));
  755. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
  756. &tp);
  757. if (error) {
  758. ASSERT(XFS_FORCED_SHUTDOWN(mp));
  759. return error;
  760. }
  761. xfs_ilock(ip, XFS_ILOCK_EXCL);
  762. xfs_trans_ijoin(tp, ip, 0);
  763. /*
  764. * Do not update the on-disk file size. If we update the
  765. * on-disk file size and then the system crashes before the
  766. * contents of the file are flushed to disk then the files
  767. * may be full of holes (ie NULL files bug).
  768. */
  769. error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
  770. XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
  771. if (error) {
  772. /*
  773. * If we get an error at this point we simply don't
  774. * bother truncating the file.
  775. */
  776. xfs_trans_cancel(tp);
  777. } else {
  778. error = xfs_trans_commit(tp);
  779. if (!error)
  780. xfs_inode_clear_eofblocks_tag(ip);
  781. }
  782. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  783. }
  784. return error;
  785. }
  786. int
  787. xfs_alloc_file_space(
  788. struct xfs_inode *ip,
  789. xfs_off_t offset,
  790. xfs_off_t len,
  791. int alloc_type)
  792. {
  793. xfs_mount_t *mp = ip->i_mount;
  794. xfs_off_t count;
  795. xfs_filblks_t allocated_fsb;
  796. xfs_filblks_t allocatesize_fsb;
  797. xfs_extlen_t extsz, temp;
  798. xfs_fileoff_t startoffset_fsb;
  799. xfs_fsblock_t firstfsb;
  800. int nimaps;
  801. int quota_flag;
  802. int rt;
  803. xfs_trans_t *tp;
  804. xfs_bmbt_irec_t imaps[1], *imapp;
  805. struct xfs_defer_ops dfops;
  806. uint qblocks, resblks, resrtextents;
  807. int error;
  808. trace_xfs_alloc_file_space(ip);
  809. if (XFS_FORCED_SHUTDOWN(mp))
  810. return -EIO;
  811. error = xfs_qm_dqattach(ip);
  812. if (error)
  813. return error;
  814. if (len <= 0)
  815. return -EINVAL;
  816. rt = XFS_IS_REALTIME_INODE(ip);
  817. extsz = xfs_get_extsz_hint(ip);
  818. count = len;
  819. imapp = &imaps[0];
  820. nimaps = 1;
  821. startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
  822. allocatesize_fsb = XFS_B_TO_FSB(mp, count);
  823. /*
  824. * Allocate file space until done or until there is an error
  825. */
  826. while (allocatesize_fsb && !error) {
  827. xfs_fileoff_t s, e;
  828. /*
  829. * Determine space reservations for data/realtime.
  830. */
  831. if (unlikely(extsz)) {
  832. s = startoffset_fsb;
  833. do_div(s, extsz);
  834. s *= extsz;
  835. e = startoffset_fsb + allocatesize_fsb;
  836. div_u64_rem(startoffset_fsb, extsz, &temp);
  837. if (temp)
  838. e += temp;
  839. div_u64_rem(e, extsz, &temp);
  840. if (temp)
  841. e += extsz - temp;
  842. } else {
  843. s = 0;
  844. e = allocatesize_fsb;
  845. }
  846. /*
  847. * The transaction reservation is limited to a 32-bit block
  848. * count, hence we need to limit the number of blocks we are
  849. * trying to reserve to avoid an overflow. We can't allocate
  850. * more than @nimaps extents, and an extent is limited on disk
  851. * to MAXEXTLEN (21 bits), so use that to enforce the limit.
  852. */
  853. resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
  854. if (unlikely(rt)) {
  855. resrtextents = qblocks = resblks;
  856. resrtextents /= mp->m_sb.sb_rextsize;
  857. resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
  858. quota_flag = XFS_QMOPT_RES_RTBLKS;
  859. } else {
  860. resrtextents = 0;
  861. resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
  862. quota_flag = XFS_QMOPT_RES_REGBLKS;
  863. }
  864. /*
  865. * Allocate and setup the transaction.
  866. */
  867. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
  868. resrtextents, 0, &tp);
  869. /*
  870. * Check for running out of space
  871. */
  872. if (error) {
  873. /*
  874. * Free the transaction structure.
  875. */
  876. ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
  877. break;
  878. }
  879. xfs_ilock(ip, XFS_ILOCK_EXCL);
  880. error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
  881. 0, quota_flag);
  882. if (error)
  883. goto error1;
  884. xfs_trans_ijoin(tp, ip, 0);
  885. xfs_defer_init(&dfops, &firstfsb);
  886. error = xfs_bmapi_write(tp, ip, startoffset_fsb,
  887. allocatesize_fsb, alloc_type, &firstfsb,
  888. resblks, imapp, &nimaps, &dfops);
  889. if (error)
  890. goto error0;
  891. /*
  892. * Complete the transaction
  893. */
  894. error = xfs_defer_finish(&tp, &dfops);
  895. if (error)
  896. goto error0;
  897. error = xfs_trans_commit(tp);
  898. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  899. if (error)
  900. break;
  901. allocated_fsb = imapp->br_blockcount;
  902. if (nimaps == 0) {
  903. error = -ENOSPC;
  904. break;
  905. }
  906. startoffset_fsb += allocated_fsb;
  907. allocatesize_fsb -= allocated_fsb;
  908. }
  909. return error;
  910. error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
  911. xfs_defer_cancel(&dfops);
  912. xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
  913. error1: /* Just cancel transaction */
  914. xfs_trans_cancel(tp);
  915. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  916. return error;
  917. }
  918. static int
  919. xfs_unmap_extent(
  920. struct xfs_inode *ip,
  921. xfs_fileoff_t startoffset_fsb,
  922. xfs_filblks_t len_fsb,
  923. int *done)
  924. {
  925. struct xfs_mount *mp = ip->i_mount;
  926. struct xfs_trans *tp;
  927. struct xfs_defer_ops dfops;
  928. xfs_fsblock_t firstfsb;
  929. uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
  930. int error;
  931. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
  932. if (error) {
  933. ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
  934. return error;
  935. }
  936. xfs_ilock(ip, XFS_ILOCK_EXCL);
  937. error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
  938. ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
  939. if (error)
  940. goto out_trans_cancel;
  941. xfs_trans_ijoin(tp, ip, 0);
  942. xfs_defer_init(&dfops, &firstfsb);
  943. error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
  944. &dfops, done);
  945. if (error)
  946. goto out_bmap_cancel;
  947. xfs_defer_ijoin(&dfops, ip);
  948. error = xfs_defer_finish(&tp, &dfops);
  949. if (error)
  950. goto out_bmap_cancel;
  951. error = xfs_trans_commit(tp);
  952. out_unlock:
  953. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  954. return error;
  955. out_bmap_cancel:
  956. xfs_defer_cancel(&dfops);
  957. out_trans_cancel:
  958. xfs_trans_cancel(tp);
  959. goto out_unlock;
  960. }
  961. static int
  962. xfs_adjust_extent_unmap_boundaries(
  963. struct xfs_inode *ip,
  964. xfs_fileoff_t *startoffset_fsb,
  965. xfs_fileoff_t *endoffset_fsb)
  966. {
  967. struct xfs_mount *mp = ip->i_mount;
  968. struct xfs_bmbt_irec imap;
  969. int nimap, error;
  970. xfs_extlen_t mod = 0;
  971. nimap = 1;
  972. error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
  973. if (error)
  974. return error;
  975. if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
  976. ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
  977. div_u64_rem(imap.br_startblock, mp->m_sb.sb_rextsize, &mod);
  978. if (mod)
  979. *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
  980. }
  981. nimap = 1;
  982. error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
  983. if (error)
  984. return error;
  985. if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
  986. ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
  987. mod++;
  988. if (mod && mod != mp->m_sb.sb_rextsize)
  989. *endoffset_fsb -= mod;
  990. }
  991. return 0;
  992. }
  993. static int
  994. xfs_flush_unmap_range(
  995. struct xfs_inode *ip,
  996. xfs_off_t offset,
  997. xfs_off_t len)
  998. {
  999. struct xfs_mount *mp = ip->i_mount;
  1000. struct inode *inode = VFS_I(ip);
  1001. xfs_off_t rounding, start, end;
  1002. int error;
  1003. /* wait for the completion of any pending DIOs */
  1004. inode_dio_wait(inode);
  1005. rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
  1006. start = round_down(offset, rounding);
  1007. end = round_up(offset + len, rounding) - 1;
  1008. error = filemap_write_and_wait_range(inode->i_mapping, start, end);
  1009. if (error)
  1010. return error;
  1011. truncate_pagecache_range(inode, start, end);
  1012. return 0;
  1013. }
  1014. int
  1015. xfs_free_file_space(
  1016. struct xfs_inode *ip,
  1017. xfs_off_t offset,
  1018. xfs_off_t len)
  1019. {
  1020. struct xfs_mount *mp = ip->i_mount;
  1021. xfs_fileoff_t startoffset_fsb;
  1022. xfs_fileoff_t endoffset_fsb;
  1023. int done = 0, error;
  1024. trace_xfs_free_file_space(ip);
  1025. error = xfs_qm_dqattach(ip);
  1026. if (error)
  1027. return error;
  1028. if (len <= 0) /* if nothing being freed */
  1029. return 0;
  1030. error = xfs_flush_unmap_range(ip, offset, len);
  1031. if (error)
  1032. return error;
  1033. startoffset_fsb = XFS_B_TO_FSB(mp, offset);
  1034. endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
  1035. /*
  1036. * Need to zero the stuff we're not freeing, on disk. If it's a RT file
  1037. * and we can't use unwritten extents then we actually need to ensure
  1038. * to zero the whole extent, otherwise we just need to take of block
  1039. * boundaries, and xfs_bunmapi will handle the rest.
  1040. */
  1041. if (XFS_IS_REALTIME_INODE(ip) &&
  1042. !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
  1043. error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
  1044. &endoffset_fsb);
  1045. if (error)
  1046. return error;
  1047. }
  1048. if (endoffset_fsb > startoffset_fsb) {
  1049. while (!done) {
  1050. error = xfs_unmap_extent(ip, startoffset_fsb,
  1051. endoffset_fsb - startoffset_fsb, &done);
  1052. if (error)
  1053. return error;
  1054. }
  1055. }
  1056. /*
  1057. * Now that we've unmap all full blocks we'll have to zero out any
  1058. * partial block at the beginning and/or end. iomap_zero_range is smart
  1059. * enough to skip any holes, including those we just created, but we
  1060. * must take care not to zero beyond EOF and enlarge i_size.
  1061. */
  1062. if (offset >= XFS_ISIZE(ip))
  1063. return 0;
  1064. if (offset + len > XFS_ISIZE(ip))
  1065. len = XFS_ISIZE(ip) - offset;
  1066. return iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
  1067. }
  1068. /*
  1069. * Preallocate and zero a range of a file. This mechanism has the allocation
  1070. * semantics of fallocate and in addition converts data in the range to zeroes.
  1071. */
  1072. int
  1073. xfs_zero_file_space(
  1074. struct xfs_inode *ip,
  1075. xfs_off_t offset,
  1076. xfs_off_t len)
  1077. {
  1078. struct xfs_mount *mp = ip->i_mount;
  1079. uint blksize;
  1080. int error;
  1081. trace_xfs_zero_file_space(ip);
  1082. blksize = 1 << mp->m_sb.sb_blocklog;
  1083. /*
  1084. * Punch a hole and prealloc the range. We use hole punch rather than
  1085. * unwritten extent conversion for two reasons:
  1086. *
  1087. * 1.) Hole punch handles partial block zeroing for us.
  1088. *
  1089. * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
  1090. * by virtue of the hole punch.
  1091. */
  1092. error = xfs_free_file_space(ip, offset, len);
  1093. if (error)
  1094. goto out;
  1095. error = xfs_alloc_file_space(ip, round_down(offset, blksize),
  1096. round_up(offset + len, blksize) -
  1097. round_down(offset, blksize),
  1098. XFS_BMAPI_PREALLOC);
  1099. out:
  1100. return error;
  1101. }
  1102. static int
  1103. xfs_prepare_shift(
  1104. struct xfs_inode *ip,
  1105. loff_t offset)
  1106. {
  1107. int error;
  1108. /*
  1109. * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
  1110. * into the accessible region of the file.
  1111. */
  1112. if (xfs_can_free_eofblocks(ip, true)) {
  1113. error = xfs_free_eofblocks(ip);
  1114. if (error)
  1115. return error;
  1116. }
  1117. /*
  1118. * Writeback and invalidate cache for the remainder of the file as we're
  1119. * about to shift down every extent from offset to EOF.
  1120. */
  1121. error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1);
  1122. if (error)
  1123. return error;
  1124. error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
  1125. offset >> PAGE_SHIFT, -1);
  1126. if (error)
  1127. return error;
  1128. /*
  1129. * Clean out anything hanging around in the cow fork now that
  1130. * we've flushed all the dirty data out to disk to avoid having
  1131. * CoW extents at the wrong offsets.
  1132. */
  1133. if (xfs_is_reflink_inode(ip)) {
  1134. error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
  1135. true);
  1136. if (error)
  1137. return error;
  1138. }
  1139. return 0;
  1140. }
  1141. /*
  1142. * xfs_collapse_file_space()
  1143. * This routine frees disk space and shift extent for the given file.
  1144. * The first thing we do is to free data blocks in the specified range
  1145. * by calling xfs_free_file_space(). It would also sync dirty data
  1146. * and invalidate page cache over the region on which collapse range
  1147. * is working. And Shift extent records to the left to cover a hole.
  1148. * RETURNS:
  1149. * 0 on success
  1150. * errno on error
  1151. *
  1152. */
  1153. int
  1154. xfs_collapse_file_space(
  1155. struct xfs_inode *ip,
  1156. xfs_off_t offset,
  1157. xfs_off_t len)
  1158. {
  1159. struct xfs_mount *mp = ip->i_mount;
  1160. struct xfs_trans *tp;
  1161. int error;
  1162. struct xfs_defer_ops dfops;
  1163. xfs_fsblock_t first_block;
  1164. xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
  1165. xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
  1166. uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
  1167. bool done = false;
  1168. ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  1169. ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
  1170. trace_xfs_collapse_file_space(ip);
  1171. error = xfs_free_file_space(ip, offset, len);
  1172. if (error)
  1173. return error;
  1174. error = xfs_prepare_shift(ip, offset);
  1175. if (error)
  1176. return error;
  1177. while (!error && !done) {
  1178. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
  1179. &tp);
  1180. if (error)
  1181. break;
  1182. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1183. error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
  1184. ip->i_gdquot, ip->i_pdquot, resblks, 0,
  1185. XFS_QMOPT_RES_REGBLKS);
  1186. if (error)
  1187. goto out_trans_cancel;
  1188. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  1189. xfs_defer_init(&dfops, &first_block);
  1190. error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
  1191. &done, &first_block, &dfops);
  1192. if (error)
  1193. goto out_bmap_cancel;
  1194. error = xfs_defer_finish(&tp, &dfops);
  1195. if (error)
  1196. goto out_bmap_cancel;
  1197. error = xfs_trans_commit(tp);
  1198. }
  1199. return error;
  1200. out_bmap_cancel:
  1201. xfs_defer_cancel(&dfops);
  1202. out_trans_cancel:
  1203. xfs_trans_cancel(tp);
  1204. return error;
  1205. }
  1206. /*
  1207. * xfs_insert_file_space()
  1208. * This routine create hole space by shifting extents for the given file.
  1209. * The first thing we do is to sync dirty data and invalidate page cache
  1210. * over the region on which insert range is working. And split an extent
  1211. * to two extents at given offset by calling xfs_bmap_split_extent.
  1212. * And shift all extent records which are laying between [offset,
  1213. * last allocated extent] to the right to reserve hole range.
  1214. * RETURNS:
  1215. * 0 on success
  1216. * errno on error
  1217. */
  1218. int
  1219. xfs_insert_file_space(
  1220. struct xfs_inode *ip,
  1221. loff_t offset,
  1222. loff_t len)
  1223. {
  1224. struct xfs_mount *mp = ip->i_mount;
  1225. struct xfs_trans *tp;
  1226. int error;
  1227. struct xfs_defer_ops dfops;
  1228. xfs_fsblock_t first_block;
  1229. xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
  1230. xfs_fileoff_t next_fsb = NULLFSBLOCK;
  1231. xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
  1232. bool done = false;
  1233. ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
  1234. ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
  1235. trace_xfs_insert_file_space(ip);
  1236. error = xfs_prepare_shift(ip, offset);
  1237. if (error)
  1238. return error;
  1239. /*
  1240. * The extent shifting code works on extent granularity. So, if stop_fsb
  1241. * is not the starting block of extent, we need to split the extent at
  1242. * stop_fsb.
  1243. */
  1244. error = xfs_bmap_split_extent(ip, stop_fsb);
  1245. if (error)
  1246. return error;
  1247. while (!error && !done) {
  1248. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0,
  1249. &tp);
  1250. if (error)
  1251. break;
  1252. xfs_ilock(ip, XFS_ILOCK_EXCL);
  1253. xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
  1254. xfs_defer_init(&dfops, &first_block);
  1255. error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
  1256. &done, stop_fsb, &first_block, &dfops);
  1257. if (error)
  1258. goto out_bmap_cancel;
  1259. error = xfs_defer_finish(&tp, &dfops);
  1260. if (error)
  1261. goto out_bmap_cancel;
  1262. error = xfs_trans_commit(tp);
  1263. }
  1264. return error;
  1265. out_bmap_cancel:
  1266. xfs_defer_cancel(&dfops);
  1267. xfs_trans_cancel(tp);
  1268. return error;
  1269. }
  1270. /*
  1271. * We need to check that the format of the data fork in the temporary inode is
  1272. * valid for the target inode before doing the swap. This is not a problem with
  1273. * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
  1274. * data fork depending on the space the attribute fork is taking so we can get
  1275. * invalid formats on the target inode.
  1276. *
  1277. * E.g. target has space for 7 extents in extent format, temp inode only has
  1278. * space for 6. If we defragment down to 7 extents, then the tmp format is a
  1279. * btree, but when swapped it needs to be in extent format. Hence we can't just
  1280. * blindly swap data forks on attr2 filesystems.
  1281. *
  1282. * Note that we check the swap in both directions so that we don't end up with
  1283. * a corrupt temporary inode, either.
  1284. *
  1285. * Note that fixing the way xfs_fsr sets up the attribute fork in the source
  1286. * inode will prevent this situation from occurring, so all we do here is
  1287. * reject and log the attempt. basically we are putting the responsibility on
  1288. * userspace to get this right.
  1289. */
  1290. static int
  1291. xfs_swap_extents_check_format(
  1292. struct xfs_inode *ip, /* target inode */
  1293. struct xfs_inode *tip) /* tmp inode */
  1294. {
  1295. /* Should never get a local format */
  1296. if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
  1297. tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
  1298. return -EINVAL;
  1299. /*
  1300. * if the target inode has less extents that then temporary inode then
  1301. * why did userspace call us?
  1302. */
  1303. if (ip->i_d.di_nextents < tip->i_d.di_nextents)
  1304. return -EINVAL;
  1305. /*
  1306. * If we have to use the (expensive) rmap swap method, we can
  1307. * handle any number of extents and any format.
  1308. */
  1309. if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
  1310. return 0;
  1311. /*
  1312. * if the target inode is in extent form and the temp inode is in btree
  1313. * form then we will end up with the target inode in the wrong format
  1314. * as we already know there are less extents in the temp inode.
  1315. */
  1316. if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
  1317. tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
  1318. return -EINVAL;
  1319. /* Check temp in extent form to max in target */
  1320. if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
  1321. XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
  1322. XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
  1323. return -EINVAL;
  1324. /* Check target in extent form to max in temp */
  1325. if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
  1326. XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
  1327. XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
  1328. return -EINVAL;
  1329. /*
  1330. * If we are in a btree format, check that the temp root block will fit
  1331. * in the target and that it has enough extents to be in btree format
  1332. * in the target.
  1333. *
  1334. * Note that we have to be careful to allow btree->extent conversions
  1335. * (a common defrag case) which will occur when the temp inode is in
  1336. * extent format...
  1337. */
  1338. if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
  1339. if (XFS_IFORK_Q(ip) &&
  1340. XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
  1341. return -EINVAL;
  1342. if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
  1343. XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
  1344. return -EINVAL;
  1345. }
  1346. /* Reciprocal target->temp btree format checks */
  1347. if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
  1348. if (XFS_IFORK_Q(tip) &&
  1349. XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
  1350. return -EINVAL;
  1351. if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
  1352. XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
  1353. return -EINVAL;
  1354. }
  1355. return 0;
  1356. }
  1357. static int
  1358. xfs_swap_extent_flush(
  1359. struct xfs_inode *ip)
  1360. {
  1361. int error;
  1362. error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
  1363. if (error)
  1364. return error;
  1365. truncate_pagecache_range(VFS_I(ip), 0, -1);
  1366. /* Verify O_DIRECT for ftmp */
  1367. if (VFS_I(ip)->i_mapping->nrpages)
  1368. return -EINVAL;
  1369. return 0;
  1370. }
  1371. /*
  1372. * Move extents from one file to another, when rmap is enabled.
  1373. */
  1374. STATIC int
  1375. xfs_swap_extent_rmap(
  1376. struct xfs_trans **tpp,
  1377. struct xfs_inode *ip,
  1378. struct xfs_inode *tip)
  1379. {
  1380. struct xfs_bmbt_irec irec;
  1381. struct xfs_bmbt_irec uirec;
  1382. struct xfs_bmbt_irec tirec;
  1383. xfs_fileoff_t offset_fsb;
  1384. xfs_fileoff_t end_fsb;
  1385. xfs_filblks_t count_fsb;
  1386. xfs_fsblock_t firstfsb;
  1387. struct xfs_defer_ops dfops;
  1388. int error;
  1389. xfs_filblks_t ilen;
  1390. xfs_filblks_t rlen;
  1391. int nimaps;
  1392. uint64_t tip_flags2;
  1393. /*
  1394. * If the source file has shared blocks, we must flag the donor
  1395. * file as having shared blocks so that we get the shared-block
  1396. * rmap functions when we go to fix up the rmaps. The flags
  1397. * will be switch for reals later.
  1398. */
  1399. tip_flags2 = tip->i_d.di_flags2;
  1400. if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
  1401. tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
  1402. offset_fsb = 0;
  1403. end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
  1404. count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
  1405. while (count_fsb) {
  1406. /* Read extent from the donor file */
  1407. nimaps = 1;
  1408. error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
  1409. &nimaps, 0);
  1410. if (error)
  1411. goto out;
  1412. ASSERT(nimaps == 1);
  1413. ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
  1414. trace_xfs_swap_extent_rmap_remap(tip, &tirec);
  1415. ilen = tirec.br_blockcount;
  1416. /* Unmap the old blocks in the source file. */
  1417. while (tirec.br_blockcount) {
  1418. xfs_defer_init(&dfops, &firstfsb);
  1419. trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
  1420. /* Read extent from the source file */
  1421. nimaps = 1;
  1422. error = xfs_bmapi_read(ip, tirec.br_startoff,
  1423. tirec.br_blockcount, &irec,
  1424. &nimaps, 0);
  1425. if (error)
  1426. goto out_defer;
  1427. ASSERT(nimaps == 1);
  1428. ASSERT(tirec.br_startoff == irec.br_startoff);
  1429. trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
  1430. /* Trim the extent. */
  1431. uirec = tirec;
  1432. uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
  1433. tirec.br_blockcount,
  1434. irec.br_blockcount);
  1435. trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
  1436. /* Remove the mapping from the donor file. */
  1437. error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
  1438. tip, &uirec);
  1439. if (error)
  1440. goto out_defer;
  1441. /* Remove the mapping from the source file. */
  1442. error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
  1443. ip, &irec);
  1444. if (error)
  1445. goto out_defer;
  1446. /* Map the donor file's blocks into the source file. */
  1447. error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
  1448. ip, &uirec);
  1449. if (error)
  1450. goto out_defer;
  1451. /* Map the source file's blocks into the donor file. */
  1452. error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
  1453. tip, &irec);
  1454. if (error)
  1455. goto out_defer;
  1456. xfs_defer_ijoin(&dfops, ip);
  1457. error = xfs_defer_finish(tpp, &dfops);
  1458. if (error)
  1459. goto out_defer;
  1460. tirec.br_startoff += rlen;
  1461. if (tirec.br_startblock != HOLESTARTBLOCK &&
  1462. tirec.br_startblock != DELAYSTARTBLOCK)
  1463. tirec.br_startblock += rlen;
  1464. tirec.br_blockcount -= rlen;
  1465. }
  1466. /* Roll on... */
  1467. count_fsb -= ilen;
  1468. offset_fsb += ilen;
  1469. }
  1470. tip->i_d.di_flags2 = tip_flags2;
  1471. return 0;
  1472. out_defer:
  1473. xfs_defer_cancel(&dfops);
  1474. out:
  1475. trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
  1476. tip->i_d.di_flags2 = tip_flags2;
  1477. return error;
  1478. }
  1479. /* Swap the extents of two files by swapping data forks. */
  1480. STATIC int
  1481. xfs_swap_extent_forks(
  1482. struct xfs_trans *tp,
  1483. struct xfs_inode *ip,
  1484. struct xfs_inode *tip,
  1485. int *src_log_flags,
  1486. int *target_log_flags)
  1487. {
  1488. struct xfs_ifork tempifp, *ifp, *tifp;
  1489. xfs_filblks_t aforkblks = 0;
  1490. xfs_filblks_t taforkblks = 0;
  1491. xfs_extnum_t junk;
  1492. uint64_t tmp;
  1493. int error;
  1494. /*
  1495. * Count the number of extended attribute blocks
  1496. */
  1497. if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
  1498. (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
  1499. error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
  1500. &aforkblks);
  1501. if (error)
  1502. return error;
  1503. }
  1504. if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
  1505. (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
  1506. error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
  1507. &taforkblks);
  1508. if (error)
  1509. return error;
  1510. }
  1511. /*
  1512. * Btree format (v3) inodes have the inode number stamped in the bmbt
  1513. * block headers. We can't start changing the bmbt blocks until the
  1514. * inode owner change is logged so recovery does the right thing in the
  1515. * event of a crash. Set the owner change log flags now and leave the
  1516. * bmbt scan as the last step.
  1517. */
  1518. if (ip->i_d.di_version == 3 &&
  1519. ip->i_d.di_format == XFS_DINODE_FMT_BTREE)
  1520. (*target_log_flags) |= XFS_ILOG_DOWNER;
  1521. if (tip->i_d.di_version == 3 &&
  1522. tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
  1523. (*src_log_flags) |= XFS_ILOG_DOWNER;
  1524. /*
  1525. * Swap the data forks of the inodes
  1526. */
  1527. ifp = &ip->i_df;
  1528. tifp = &tip->i_df;
  1529. tempifp = *ifp; /* struct copy */
  1530. *ifp = *tifp; /* struct copy */
  1531. *tifp = tempifp; /* struct copy */
  1532. /*
  1533. * Fix the on-disk inode values
  1534. */
  1535. tmp = (uint64_t)ip->i_d.di_nblocks;
  1536. ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
  1537. tip->i_d.di_nblocks = tmp + taforkblks - aforkblks;
  1538. tmp = (uint64_t) ip->i_d.di_nextents;
  1539. ip->i_d.di_nextents = tip->i_d.di_nextents;
  1540. tip->i_d.di_nextents = tmp;
  1541. tmp = (uint64_t) ip->i_d.di_format;
  1542. ip->i_d.di_format = tip->i_d.di_format;
  1543. tip->i_d.di_format = tmp;
  1544. /*
  1545. * The extents in the source inode could still contain speculative
  1546. * preallocation beyond EOF (e.g. the file is open but not modified
  1547. * while defrag is in progress). In that case, we need to copy over the
  1548. * number of delalloc blocks the data fork in the source inode is
  1549. * tracking beyond EOF so that when the fork is truncated away when the
  1550. * temporary inode is unlinked we don't underrun the i_delayed_blks
  1551. * counter on that inode.
  1552. */
  1553. ASSERT(tip->i_delayed_blks == 0);
  1554. tip->i_delayed_blks = ip->i_delayed_blks;
  1555. ip->i_delayed_blks = 0;
  1556. switch (ip->i_d.di_format) {
  1557. case XFS_DINODE_FMT_EXTENTS:
  1558. (*src_log_flags) |= XFS_ILOG_DEXT;
  1559. break;
  1560. case XFS_DINODE_FMT_BTREE:
  1561. ASSERT(ip->i_d.di_version < 3 ||
  1562. (*src_log_flags & XFS_ILOG_DOWNER));
  1563. (*src_log_flags) |= XFS_ILOG_DBROOT;
  1564. break;
  1565. }
  1566. switch (tip->i_d.di_format) {
  1567. case XFS_DINODE_FMT_EXTENTS:
  1568. (*target_log_flags) |= XFS_ILOG_DEXT;
  1569. break;
  1570. case XFS_DINODE_FMT_BTREE:
  1571. (*target_log_flags) |= XFS_ILOG_DBROOT;
  1572. ASSERT(tip->i_d.di_version < 3 ||
  1573. (*target_log_flags & XFS_ILOG_DOWNER));
  1574. break;
  1575. }
  1576. return 0;
  1577. }
  1578. /*
  1579. * Fix up the owners of the bmbt blocks to refer to the current inode. The
  1580. * change owner scan attempts to order all modified buffers in the current
  1581. * transaction. In the event of ordered buffer failure, the offending buffer is
  1582. * physically logged as a fallback and the scan returns -EAGAIN. We must roll
  1583. * the transaction in this case to replenish the fallback log reservation and
  1584. * restart the scan. This process repeats until the scan completes.
  1585. */
  1586. static int
  1587. xfs_swap_change_owner(
  1588. struct xfs_trans **tpp,
  1589. struct xfs_inode *ip,
  1590. struct xfs_inode *tmpip)
  1591. {
  1592. int error;
  1593. struct xfs_trans *tp = *tpp;
  1594. do {
  1595. error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
  1596. NULL);
  1597. /* success or fatal error */
  1598. if (error != -EAGAIN)
  1599. break;
  1600. error = xfs_trans_roll(tpp);
  1601. if (error)
  1602. break;
  1603. tp = *tpp;
  1604. /*
  1605. * Redirty both inodes so they can relog and keep the log tail
  1606. * moving forward.
  1607. */
  1608. xfs_trans_ijoin(tp, ip, 0);
  1609. xfs_trans_ijoin(tp, tmpip, 0);
  1610. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  1611. xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
  1612. } while (true);
  1613. return error;
  1614. }
  1615. int
  1616. xfs_swap_extents(
  1617. struct xfs_inode *ip, /* target inode */
  1618. struct xfs_inode *tip, /* tmp inode */
  1619. struct xfs_swapext *sxp)
  1620. {
  1621. struct xfs_mount *mp = ip->i_mount;
  1622. struct xfs_trans *tp;
  1623. struct xfs_bstat *sbp = &sxp->sx_stat;
  1624. int src_log_flags, target_log_flags;
  1625. int error = 0;
  1626. int lock_flags;
  1627. struct xfs_ifork *cowfp;
  1628. uint64_t f;
  1629. int resblks = 0;
  1630. /*
  1631. * Lock the inodes against other IO, page faults and truncate to
  1632. * begin with. Then we can ensure the inodes are flushed and have no
  1633. * page cache safely. Once we have done this we can take the ilocks and
  1634. * do the rest of the checks.
  1635. */
  1636. lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
  1637. lock_flags = XFS_MMAPLOCK_EXCL;
  1638. xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
  1639. /* Verify that both files have the same format */
  1640. if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
  1641. error = -EINVAL;
  1642. goto out_unlock;
  1643. }
  1644. /* Verify both files are either real-time or non-realtime */
  1645. if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
  1646. error = -EINVAL;
  1647. goto out_unlock;
  1648. }
  1649. error = xfs_swap_extent_flush(ip);
  1650. if (error)
  1651. goto out_unlock;
  1652. error = xfs_swap_extent_flush(tip);
  1653. if (error)
  1654. goto out_unlock;
  1655. /*
  1656. * Extent "swapping" with rmap requires a permanent reservation and
  1657. * a block reservation because it's really just a remap operation
  1658. * performed with log redo items!
  1659. */
  1660. if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
  1661. int w = XFS_DATA_FORK;
  1662. uint32_t ipnext = XFS_IFORK_NEXTENTS(ip, w);
  1663. uint32_t tipnext = XFS_IFORK_NEXTENTS(tip, w);
  1664. /*
  1665. * Conceptually this shouldn't affect the shape of either bmbt,
  1666. * but since we atomically move extents one by one, we reserve
  1667. * enough space to rebuild both trees.
  1668. */
  1669. resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
  1670. resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
  1671. /*
  1672. * Handle the corner case where either inode might straddle the
  1673. * btree format boundary. If so, the inode could bounce between
  1674. * btree <-> extent format on unmap -> remap cycles, freeing and
  1675. * allocating a bmapbt block each time.
  1676. */
  1677. if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1))
  1678. resblks += XFS_IFORK_MAXEXT(ip, w);
  1679. if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1))
  1680. resblks += XFS_IFORK_MAXEXT(tip, w);
  1681. }
  1682. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
  1683. if (error)
  1684. goto out_unlock;
  1685. /*
  1686. * Lock and join the inodes to the tansaction so that transaction commit
  1687. * or cancel will unlock the inodes from this point onwards.
  1688. */
  1689. xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
  1690. lock_flags |= XFS_ILOCK_EXCL;
  1691. xfs_trans_ijoin(tp, ip, 0);
  1692. xfs_trans_ijoin(tp, tip, 0);
  1693. /* Verify all data are being swapped */
  1694. if (sxp->sx_offset != 0 ||
  1695. sxp->sx_length != ip->i_d.di_size ||
  1696. sxp->sx_length != tip->i_d.di_size) {
  1697. error = -EFAULT;
  1698. goto out_trans_cancel;
  1699. }
  1700. trace_xfs_swap_extent_before(ip, 0);
  1701. trace_xfs_swap_extent_before(tip, 1);
  1702. /* check inode formats now that data is flushed */
  1703. error = xfs_swap_extents_check_format(ip, tip);
  1704. if (error) {
  1705. xfs_notice(mp,
  1706. "%s: inode 0x%llx format is incompatible for exchanging.",
  1707. __func__, ip->i_ino);
  1708. goto out_trans_cancel;
  1709. }
  1710. /*
  1711. * Compare the current change & modify times with that
  1712. * passed in. If they differ, we abort this swap.
  1713. * This is the mechanism used to ensure the calling
  1714. * process that the file was not changed out from
  1715. * under it.
  1716. */
  1717. if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
  1718. (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
  1719. (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
  1720. (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
  1721. error = -EBUSY;
  1722. goto out_trans_cancel;
  1723. }
  1724. /*
  1725. * Note the trickiness in setting the log flags - we set the owner log
  1726. * flag on the opposite inode (i.e. the inode we are setting the new
  1727. * owner to be) because once we swap the forks and log that, log
  1728. * recovery is going to see the fork as owned by the swapped inode,
  1729. * not the pre-swapped inodes.
  1730. */
  1731. src_log_flags = XFS_ILOG_CORE;
  1732. target_log_flags = XFS_ILOG_CORE;
  1733. if (xfs_sb_version_hasrmapbt(&mp->m_sb))
  1734. error = xfs_swap_extent_rmap(&tp, ip, tip);
  1735. else
  1736. error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
  1737. &target_log_flags);
  1738. if (error)
  1739. goto out_trans_cancel;
  1740. /* Do we have to swap reflink flags? */
  1741. if ((ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK) ^
  1742. (tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)) {
  1743. f = ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
  1744. ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
  1745. ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK;
  1746. tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
  1747. tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK;
  1748. }
  1749. /* Swap the cow forks. */
  1750. if (xfs_sb_version_hasreflink(&mp->m_sb)) {
  1751. xfs_extnum_t extnum;
  1752. ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS);
  1753. ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS);
  1754. extnum = ip->i_cnextents;
  1755. ip->i_cnextents = tip->i_cnextents;
  1756. tip->i_cnextents = extnum;
  1757. cowfp = ip->i_cowfp;
  1758. ip->i_cowfp = tip->i_cowfp;
  1759. tip->i_cowfp = cowfp;
  1760. if (ip->i_cowfp && ip->i_cowfp->if_bytes)
  1761. xfs_inode_set_cowblocks_tag(ip);
  1762. else
  1763. xfs_inode_clear_cowblocks_tag(ip);
  1764. if (tip->i_cowfp && tip->i_cowfp->if_bytes)
  1765. xfs_inode_set_cowblocks_tag(tip);
  1766. else
  1767. xfs_inode_clear_cowblocks_tag(tip);
  1768. }
  1769. xfs_trans_log_inode(tp, ip, src_log_flags);
  1770. xfs_trans_log_inode(tp, tip, target_log_flags);
  1771. /*
  1772. * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
  1773. * have inode number owner values in the bmbt blocks that still refer to
  1774. * the old inode. Scan each bmbt to fix up the owner values with the
  1775. * inode number of the current inode.
  1776. */
  1777. if (src_log_flags & XFS_ILOG_DOWNER) {
  1778. error = xfs_swap_change_owner(&tp, ip, tip);
  1779. if (error)
  1780. goto out_trans_cancel;
  1781. }
  1782. if (target_log_flags & XFS_ILOG_DOWNER) {
  1783. error = xfs_swap_change_owner(&tp, tip, ip);
  1784. if (error)
  1785. goto out_trans_cancel;
  1786. }
  1787. /*
  1788. * If this is a synchronous mount, make sure that the
  1789. * transaction goes to disk before returning to the user.
  1790. */
  1791. if (mp->m_flags & XFS_MOUNT_WSYNC)
  1792. xfs_trans_set_sync(tp);
  1793. error = xfs_trans_commit(tp);
  1794. trace_xfs_swap_extent_after(ip, 0);
  1795. trace_xfs_swap_extent_after(tip, 1);
  1796. out_unlock:
  1797. xfs_iunlock(ip, lock_flags);
  1798. xfs_iunlock(tip, lock_flags);
  1799. unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
  1800. return error;
  1801. out_trans_cancel:
  1802. xfs_trans_cancel(tp);
  1803. goto out_unlock;
  1804. }