xfs_refcount.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711
  1. /*
  2. * Copyright (C) 2016 Oracle. All Rights Reserved.
  3. *
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it would be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write the Free Software Foundation,
  18. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
  19. */
  20. #include "xfs.h"
  21. #include "xfs_fs.h"
  22. #include "xfs_shared.h"
  23. #include "xfs_format.h"
  24. #include "xfs_log_format.h"
  25. #include "xfs_trans_resv.h"
  26. #include "xfs_sb.h"
  27. #include "xfs_mount.h"
  28. #include "xfs_defer.h"
  29. #include "xfs_btree.h"
  30. #include "xfs_bmap.h"
  31. #include "xfs_refcount_btree.h"
  32. #include "xfs_alloc.h"
  33. #include "xfs_errortag.h"
  34. #include "xfs_error.h"
  35. #include "xfs_trace.h"
  36. #include "xfs_cksum.h"
  37. #include "xfs_trans.h"
  38. #include "xfs_bit.h"
  39. #include "xfs_refcount.h"
  40. #include "xfs_rmap.h"
  41. /* Allowable refcount adjustment amounts. */
  42. enum xfs_refc_adjust_op {
  43. XFS_REFCOUNT_ADJUST_INCREASE = 1,
  44. XFS_REFCOUNT_ADJUST_DECREASE = -1,
  45. XFS_REFCOUNT_ADJUST_COW_ALLOC = 0,
  46. XFS_REFCOUNT_ADJUST_COW_FREE = -1,
  47. };
  48. STATIC int __xfs_refcount_cow_alloc(struct xfs_btree_cur *rcur,
  49. xfs_agblock_t agbno, xfs_extlen_t aglen,
  50. struct xfs_defer_ops *dfops);
  51. STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur,
  52. xfs_agblock_t agbno, xfs_extlen_t aglen,
  53. struct xfs_defer_ops *dfops);
  54. /*
  55. * Look up the first record less than or equal to [bno, len] in the btree
  56. * given by cur.
  57. */
  58. int
  59. xfs_refcount_lookup_le(
  60. struct xfs_btree_cur *cur,
  61. xfs_agblock_t bno,
  62. int *stat)
  63. {
  64. trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
  65. XFS_LOOKUP_LE);
  66. cur->bc_rec.rc.rc_startblock = bno;
  67. cur->bc_rec.rc.rc_blockcount = 0;
  68. return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
  69. }
  70. /*
  71. * Look up the first record greater than or equal to [bno, len] in the btree
  72. * given by cur.
  73. */
  74. int
  75. xfs_refcount_lookup_ge(
  76. struct xfs_btree_cur *cur,
  77. xfs_agblock_t bno,
  78. int *stat)
  79. {
  80. trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
  81. XFS_LOOKUP_GE);
  82. cur->bc_rec.rc.rc_startblock = bno;
  83. cur->bc_rec.rc.rc_blockcount = 0;
  84. return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
  85. }
  86. /* Convert on-disk record to in-core format. */
  87. static inline void
  88. xfs_refcount_btrec_to_irec(
  89. union xfs_btree_rec *rec,
  90. struct xfs_refcount_irec *irec)
  91. {
  92. irec->rc_startblock = be32_to_cpu(rec->refc.rc_startblock);
  93. irec->rc_blockcount = be32_to_cpu(rec->refc.rc_blockcount);
  94. irec->rc_refcount = be32_to_cpu(rec->refc.rc_refcount);
  95. }
  96. /*
  97. * Get the data from the pointed-to record.
  98. */
  99. int
  100. xfs_refcount_get_rec(
  101. struct xfs_btree_cur *cur,
  102. struct xfs_refcount_irec *irec,
  103. int *stat)
  104. {
  105. union xfs_btree_rec *rec;
  106. int error;
  107. error = xfs_btree_get_rec(cur, &rec, stat);
  108. if (!error && *stat == 1) {
  109. xfs_refcount_btrec_to_irec(rec, irec);
  110. trace_xfs_refcount_get(cur->bc_mp, cur->bc_private.a.agno,
  111. irec);
  112. }
  113. return error;
  114. }
  115. /*
  116. * Update the record referred to by cur to the value given
  117. * by [bno, len, refcount].
  118. * This either works (return 0) or gets an EFSCORRUPTED error.
  119. */
  120. STATIC int
  121. xfs_refcount_update(
  122. struct xfs_btree_cur *cur,
  123. struct xfs_refcount_irec *irec)
  124. {
  125. union xfs_btree_rec rec;
  126. int error;
  127. trace_xfs_refcount_update(cur->bc_mp, cur->bc_private.a.agno, irec);
  128. rec.refc.rc_startblock = cpu_to_be32(irec->rc_startblock);
  129. rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
  130. rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
  131. error = xfs_btree_update(cur, &rec);
  132. if (error)
  133. trace_xfs_refcount_update_error(cur->bc_mp,
  134. cur->bc_private.a.agno, error, _RET_IP_);
  135. return error;
  136. }
  137. /*
  138. * Insert the record referred to by cur to the value given
  139. * by [bno, len, refcount].
  140. * This either works (return 0) or gets an EFSCORRUPTED error.
  141. */
  142. STATIC int
  143. xfs_refcount_insert(
  144. struct xfs_btree_cur *cur,
  145. struct xfs_refcount_irec *irec,
  146. int *i)
  147. {
  148. int error;
  149. trace_xfs_refcount_insert(cur->bc_mp, cur->bc_private.a.agno, irec);
  150. cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
  151. cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
  152. cur->bc_rec.rc.rc_refcount = irec->rc_refcount;
  153. error = xfs_btree_insert(cur, i);
  154. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, *i == 1, out_error);
  155. out_error:
  156. if (error)
  157. trace_xfs_refcount_insert_error(cur->bc_mp,
  158. cur->bc_private.a.agno, error, _RET_IP_);
  159. return error;
  160. }
  161. /*
  162. * Remove the record referred to by cur, then set the pointer to the spot
  163. * where the record could be re-inserted, in case we want to increment or
  164. * decrement the cursor.
  165. * This either works (return 0) or gets an EFSCORRUPTED error.
  166. */
  167. STATIC int
  168. xfs_refcount_delete(
  169. struct xfs_btree_cur *cur,
  170. int *i)
  171. {
  172. struct xfs_refcount_irec irec;
  173. int found_rec;
  174. int error;
  175. error = xfs_refcount_get_rec(cur, &irec, &found_rec);
  176. if (error)
  177. goto out_error;
  178. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  179. trace_xfs_refcount_delete(cur->bc_mp, cur->bc_private.a.agno, &irec);
  180. error = xfs_btree_delete(cur, i);
  181. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, *i == 1, out_error);
  182. if (error)
  183. goto out_error;
  184. error = xfs_refcount_lookup_ge(cur, irec.rc_startblock, &found_rec);
  185. out_error:
  186. if (error)
  187. trace_xfs_refcount_delete_error(cur->bc_mp,
  188. cur->bc_private.a.agno, error, _RET_IP_);
  189. return error;
  190. }
  191. /*
  192. * Adjusting the Reference Count
  193. *
  194. * As stated elsewhere, the reference count btree (refcbt) stores
  195. * >1 reference counts for extents of physical blocks. In this
  196. * operation, we're either raising or lowering the reference count of
  197. * some subrange stored in the tree:
  198. *
  199. * <------ adjustment range ------>
  200. * ----+ +---+-----+ +--+--------+---------
  201. * 2 | | 3 | 4 | |17| 55 | 10
  202. * ----+ +---+-----+ +--+--------+---------
  203. * X axis is physical blocks number;
  204. * reference counts are the numbers inside the rectangles
  205. *
  206. * The first thing we need to do is to ensure that there are no
  207. * refcount extents crossing either boundary of the range to be
  208. * adjusted. For any extent that does cross a boundary, split it into
  209. * two extents so that we can increment the refcount of one of the
  210. * pieces later:
  211. *
  212. * <------ adjustment range ------>
  213. * ----+ +---+-----+ +--+--------+----+----
  214. * 2 | | 3 | 2 | |17| 55 | 10 | 10
  215. * ----+ +---+-----+ +--+--------+----+----
  216. *
  217. * For this next step, let's assume that all the physical blocks in
  218. * the adjustment range are mapped to a file and are therefore in use
  219. * at least once. Therefore, we can infer that any gap in the
  220. * refcount tree within the adjustment range represents a physical
  221. * extent with refcount == 1:
  222. *
  223. * <------ adjustment range ------>
  224. * ----+---+---+-----+-+--+--------+----+----
  225. * 2 |"1"| 3 | 2 |1|17| 55 | 10 | 10
  226. * ----+---+---+-----+-+--+--------+----+----
  227. * ^
  228. *
  229. * For each extent that falls within the interval range, figure out
  230. * which extent is to the left or the right of that extent. Now we
  231. * have a left, current, and right extent. If the new reference count
  232. * of the center extent enables us to merge left, center, and right
  233. * into one record covering all three, do so. If the center extent is
  234. * at the left end of the range, abuts the left extent, and its new
  235. * reference count matches the left extent's record, then merge them.
  236. * If the center extent is at the right end of the range, abuts the
  237. * right extent, and the reference counts match, merge those. In the
  238. * example, we can left merge (assuming an increment operation):
  239. *
  240. * <------ adjustment range ------>
  241. * --------+---+-----+-+--+--------+----+----
  242. * 2 | 3 | 2 |1|17| 55 | 10 | 10
  243. * --------+---+-----+-+--+--------+----+----
  244. * ^
  245. *
  246. * For all other extents within the range, adjust the reference count
  247. * or delete it if the refcount falls below 2. If we were
  248. * incrementing, the end result looks like this:
  249. *
  250. * <------ adjustment range ------>
  251. * --------+---+-----+-+--+--------+----+----
  252. * 2 | 4 | 3 |2|18| 56 | 11 | 10
  253. * --------+---+-----+-+--+--------+----+----
  254. *
  255. * The result of a decrement operation looks as such:
  256. *
  257. * <------ adjustment range ------>
  258. * ----+ +---+ +--+--------+----+----
  259. * 2 | | 2 | |16| 54 | 9 | 10
  260. * ----+ +---+ +--+--------+----+----
  261. * DDDD 111111DD
  262. *
  263. * The blocks marked "D" are freed; the blocks marked "1" are only
  264. * referenced once and therefore the record is removed from the
  265. * refcount btree.
  266. */
  267. /* Next block after this extent. */
  268. static inline xfs_agblock_t
  269. xfs_refc_next(
  270. struct xfs_refcount_irec *rc)
  271. {
  272. return rc->rc_startblock + rc->rc_blockcount;
  273. }
  274. /*
  275. * Split a refcount extent that crosses agbno.
  276. */
  277. STATIC int
  278. xfs_refcount_split_extent(
  279. struct xfs_btree_cur *cur,
  280. xfs_agblock_t agbno,
  281. bool *shape_changed)
  282. {
  283. struct xfs_refcount_irec rcext, tmp;
  284. int found_rec;
  285. int error;
  286. *shape_changed = false;
  287. error = xfs_refcount_lookup_le(cur, agbno, &found_rec);
  288. if (error)
  289. goto out_error;
  290. if (!found_rec)
  291. return 0;
  292. error = xfs_refcount_get_rec(cur, &rcext, &found_rec);
  293. if (error)
  294. goto out_error;
  295. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  296. if (rcext.rc_startblock == agbno || xfs_refc_next(&rcext) <= agbno)
  297. return 0;
  298. *shape_changed = true;
  299. trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_private.a.agno,
  300. &rcext, agbno);
  301. /* Establish the right extent. */
  302. tmp = rcext;
  303. tmp.rc_startblock = agbno;
  304. tmp.rc_blockcount -= (agbno - rcext.rc_startblock);
  305. error = xfs_refcount_update(cur, &tmp);
  306. if (error)
  307. goto out_error;
  308. /* Insert the left extent. */
  309. tmp = rcext;
  310. tmp.rc_blockcount = agbno - rcext.rc_startblock;
  311. error = xfs_refcount_insert(cur, &tmp, &found_rec);
  312. if (error)
  313. goto out_error;
  314. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  315. return error;
  316. out_error:
  317. trace_xfs_refcount_split_extent_error(cur->bc_mp,
  318. cur->bc_private.a.agno, error, _RET_IP_);
  319. return error;
  320. }
  321. /*
  322. * Merge the left, center, and right extents.
  323. */
  324. STATIC int
  325. xfs_refcount_merge_center_extents(
  326. struct xfs_btree_cur *cur,
  327. struct xfs_refcount_irec *left,
  328. struct xfs_refcount_irec *center,
  329. struct xfs_refcount_irec *right,
  330. unsigned long long extlen,
  331. xfs_extlen_t *aglen)
  332. {
  333. int error;
  334. int found_rec;
  335. trace_xfs_refcount_merge_center_extents(cur->bc_mp,
  336. cur->bc_private.a.agno, left, center, right);
  337. /*
  338. * Make sure the center and right extents are not in the btree.
  339. * If the center extent was synthesized, the first delete call
  340. * removes the right extent and we skip the second deletion.
  341. * If center and right were in the btree, then the first delete
  342. * call removes the center and the second one removes the right
  343. * extent.
  344. */
  345. error = xfs_refcount_lookup_ge(cur, center->rc_startblock,
  346. &found_rec);
  347. if (error)
  348. goto out_error;
  349. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  350. error = xfs_refcount_delete(cur, &found_rec);
  351. if (error)
  352. goto out_error;
  353. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  354. if (center->rc_refcount > 1) {
  355. error = xfs_refcount_delete(cur, &found_rec);
  356. if (error)
  357. goto out_error;
  358. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  359. out_error);
  360. }
  361. /* Enlarge the left extent. */
  362. error = xfs_refcount_lookup_le(cur, left->rc_startblock,
  363. &found_rec);
  364. if (error)
  365. goto out_error;
  366. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  367. left->rc_blockcount = extlen;
  368. error = xfs_refcount_update(cur, left);
  369. if (error)
  370. goto out_error;
  371. *aglen = 0;
  372. return error;
  373. out_error:
  374. trace_xfs_refcount_merge_center_extents_error(cur->bc_mp,
  375. cur->bc_private.a.agno, error, _RET_IP_);
  376. return error;
  377. }
  378. /*
  379. * Merge with the left extent.
  380. */
  381. STATIC int
  382. xfs_refcount_merge_left_extent(
  383. struct xfs_btree_cur *cur,
  384. struct xfs_refcount_irec *left,
  385. struct xfs_refcount_irec *cleft,
  386. xfs_agblock_t *agbno,
  387. xfs_extlen_t *aglen)
  388. {
  389. int error;
  390. int found_rec;
  391. trace_xfs_refcount_merge_left_extent(cur->bc_mp,
  392. cur->bc_private.a.agno, left, cleft);
  393. /* If the extent at agbno (cleft) wasn't synthesized, remove it. */
  394. if (cleft->rc_refcount > 1) {
  395. error = xfs_refcount_lookup_le(cur, cleft->rc_startblock,
  396. &found_rec);
  397. if (error)
  398. goto out_error;
  399. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  400. out_error);
  401. error = xfs_refcount_delete(cur, &found_rec);
  402. if (error)
  403. goto out_error;
  404. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  405. out_error);
  406. }
  407. /* Enlarge the left extent. */
  408. error = xfs_refcount_lookup_le(cur, left->rc_startblock,
  409. &found_rec);
  410. if (error)
  411. goto out_error;
  412. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  413. left->rc_blockcount += cleft->rc_blockcount;
  414. error = xfs_refcount_update(cur, left);
  415. if (error)
  416. goto out_error;
  417. *agbno += cleft->rc_blockcount;
  418. *aglen -= cleft->rc_blockcount;
  419. return error;
  420. out_error:
  421. trace_xfs_refcount_merge_left_extent_error(cur->bc_mp,
  422. cur->bc_private.a.agno, error, _RET_IP_);
  423. return error;
  424. }
  425. /*
  426. * Merge with the right extent.
  427. */
  428. STATIC int
  429. xfs_refcount_merge_right_extent(
  430. struct xfs_btree_cur *cur,
  431. struct xfs_refcount_irec *right,
  432. struct xfs_refcount_irec *cright,
  433. xfs_extlen_t *aglen)
  434. {
  435. int error;
  436. int found_rec;
  437. trace_xfs_refcount_merge_right_extent(cur->bc_mp,
  438. cur->bc_private.a.agno, cright, right);
  439. /*
  440. * If the extent ending at agbno+aglen (cright) wasn't synthesized,
  441. * remove it.
  442. */
  443. if (cright->rc_refcount > 1) {
  444. error = xfs_refcount_lookup_le(cur, cright->rc_startblock,
  445. &found_rec);
  446. if (error)
  447. goto out_error;
  448. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  449. out_error);
  450. error = xfs_refcount_delete(cur, &found_rec);
  451. if (error)
  452. goto out_error;
  453. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  454. out_error);
  455. }
  456. /* Enlarge the right extent. */
  457. error = xfs_refcount_lookup_le(cur, right->rc_startblock,
  458. &found_rec);
  459. if (error)
  460. goto out_error;
  461. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  462. right->rc_startblock -= cright->rc_blockcount;
  463. right->rc_blockcount += cright->rc_blockcount;
  464. error = xfs_refcount_update(cur, right);
  465. if (error)
  466. goto out_error;
  467. *aglen -= cright->rc_blockcount;
  468. return error;
  469. out_error:
  470. trace_xfs_refcount_merge_right_extent_error(cur->bc_mp,
  471. cur->bc_private.a.agno, error, _RET_IP_);
  472. return error;
  473. }
  474. #define XFS_FIND_RCEXT_SHARED 1
  475. #define XFS_FIND_RCEXT_COW 2
  476. /*
  477. * Find the left extent and the one after it (cleft). This function assumes
  478. * that we've already split any extent crossing agbno.
  479. */
  480. STATIC int
  481. xfs_refcount_find_left_extents(
  482. struct xfs_btree_cur *cur,
  483. struct xfs_refcount_irec *left,
  484. struct xfs_refcount_irec *cleft,
  485. xfs_agblock_t agbno,
  486. xfs_extlen_t aglen,
  487. int flags)
  488. {
  489. struct xfs_refcount_irec tmp;
  490. int error;
  491. int found_rec;
  492. left->rc_startblock = cleft->rc_startblock = NULLAGBLOCK;
  493. error = xfs_refcount_lookup_le(cur, agbno - 1, &found_rec);
  494. if (error)
  495. goto out_error;
  496. if (!found_rec)
  497. return 0;
  498. error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
  499. if (error)
  500. goto out_error;
  501. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  502. if (xfs_refc_next(&tmp) != agbno)
  503. return 0;
  504. if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2)
  505. return 0;
  506. if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1)
  507. return 0;
  508. /* We have a left extent; retrieve (or invent) the next right one */
  509. *left = tmp;
  510. error = xfs_btree_increment(cur, 0, &found_rec);
  511. if (error)
  512. goto out_error;
  513. if (found_rec) {
  514. error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
  515. if (error)
  516. goto out_error;
  517. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  518. out_error);
  519. /* if tmp starts at the end of our range, just use that */
  520. if (tmp.rc_startblock == agbno)
  521. *cleft = tmp;
  522. else {
  523. /*
  524. * There's a gap in the refcntbt at the start of the
  525. * range we're interested in (refcount == 1) so
  526. * synthesize the implied extent and pass it back.
  527. * We assume here that the agbno/aglen range was
  528. * passed in from a data fork extent mapping and
  529. * therefore is allocated to exactly one owner.
  530. */
  531. cleft->rc_startblock = agbno;
  532. cleft->rc_blockcount = min(aglen,
  533. tmp.rc_startblock - agbno);
  534. cleft->rc_refcount = 1;
  535. }
  536. } else {
  537. /*
  538. * No extents, so pretend that there's one covering the whole
  539. * range.
  540. */
  541. cleft->rc_startblock = agbno;
  542. cleft->rc_blockcount = aglen;
  543. cleft->rc_refcount = 1;
  544. }
  545. trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_private.a.agno,
  546. left, cleft, agbno);
  547. return error;
  548. out_error:
  549. trace_xfs_refcount_find_left_extent_error(cur->bc_mp,
  550. cur->bc_private.a.agno, error, _RET_IP_);
  551. return error;
  552. }
  553. /*
  554. * Find the right extent and the one before it (cright). This function
  555. * assumes that we've already split any extents crossing agbno + aglen.
  556. */
  557. STATIC int
  558. xfs_refcount_find_right_extents(
  559. struct xfs_btree_cur *cur,
  560. struct xfs_refcount_irec *right,
  561. struct xfs_refcount_irec *cright,
  562. xfs_agblock_t agbno,
  563. xfs_extlen_t aglen,
  564. int flags)
  565. {
  566. struct xfs_refcount_irec tmp;
  567. int error;
  568. int found_rec;
  569. right->rc_startblock = cright->rc_startblock = NULLAGBLOCK;
  570. error = xfs_refcount_lookup_ge(cur, agbno + aglen, &found_rec);
  571. if (error)
  572. goto out_error;
  573. if (!found_rec)
  574. return 0;
  575. error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
  576. if (error)
  577. goto out_error;
  578. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  579. if (tmp.rc_startblock != agbno + aglen)
  580. return 0;
  581. if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2)
  582. return 0;
  583. if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1)
  584. return 0;
  585. /* We have a right extent; retrieve (or invent) the next left one */
  586. *right = tmp;
  587. error = xfs_btree_decrement(cur, 0, &found_rec);
  588. if (error)
  589. goto out_error;
  590. if (found_rec) {
  591. error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
  592. if (error)
  593. goto out_error;
  594. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  595. out_error);
  596. /* if tmp ends at the end of our range, just use that */
  597. if (xfs_refc_next(&tmp) == agbno + aglen)
  598. *cright = tmp;
  599. else {
  600. /*
  601. * There's a gap in the refcntbt at the end of the
  602. * range we're interested in (refcount == 1) so
  603. * create the implied extent and pass it back.
  604. * We assume here that the agbno/aglen range was
  605. * passed in from a data fork extent mapping and
  606. * therefore is allocated to exactly one owner.
  607. */
  608. cright->rc_startblock = max(agbno, xfs_refc_next(&tmp));
  609. cright->rc_blockcount = right->rc_startblock -
  610. cright->rc_startblock;
  611. cright->rc_refcount = 1;
  612. }
  613. } else {
  614. /*
  615. * No extents, so pretend that there's one covering the whole
  616. * range.
  617. */
  618. cright->rc_startblock = agbno;
  619. cright->rc_blockcount = aglen;
  620. cright->rc_refcount = 1;
  621. }
  622. trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_private.a.agno,
  623. cright, right, agbno + aglen);
  624. return error;
  625. out_error:
  626. trace_xfs_refcount_find_right_extent_error(cur->bc_mp,
  627. cur->bc_private.a.agno, error, _RET_IP_);
  628. return error;
  629. }
  630. /* Is this extent valid? */
  631. static inline bool
  632. xfs_refc_valid(
  633. struct xfs_refcount_irec *rc)
  634. {
  635. return rc->rc_startblock != NULLAGBLOCK;
  636. }
  637. /*
  638. * Try to merge with any extents on the boundaries of the adjustment range.
  639. */
  640. STATIC int
  641. xfs_refcount_merge_extents(
  642. struct xfs_btree_cur *cur,
  643. xfs_agblock_t *agbno,
  644. xfs_extlen_t *aglen,
  645. enum xfs_refc_adjust_op adjust,
  646. int flags,
  647. bool *shape_changed)
  648. {
  649. struct xfs_refcount_irec left = {0}, cleft = {0};
  650. struct xfs_refcount_irec cright = {0}, right = {0};
  651. int error;
  652. unsigned long long ulen;
  653. bool cequal;
  654. *shape_changed = false;
  655. /*
  656. * Find the extent just below agbno [left], just above agbno [cleft],
  657. * just below (agbno + aglen) [cright], and just above (agbno + aglen)
  658. * [right].
  659. */
  660. error = xfs_refcount_find_left_extents(cur, &left, &cleft, *agbno,
  661. *aglen, flags);
  662. if (error)
  663. return error;
  664. error = xfs_refcount_find_right_extents(cur, &right, &cright, *agbno,
  665. *aglen, flags);
  666. if (error)
  667. return error;
  668. /* No left or right extent to merge; exit. */
  669. if (!xfs_refc_valid(&left) && !xfs_refc_valid(&right))
  670. return 0;
  671. cequal = (cleft.rc_startblock == cright.rc_startblock) &&
  672. (cleft.rc_blockcount == cright.rc_blockcount);
  673. /* Try to merge left, cleft, and right. cleft must == cright. */
  674. ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount +
  675. right.rc_blockcount;
  676. if (xfs_refc_valid(&left) && xfs_refc_valid(&right) &&
  677. xfs_refc_valid(&cleft) && xfs_refc_valid(&cright) && cequal &&
  678. left.rc_refcount == cleft.rc_refcount + adjust &&
  679. right.rc_refcount == cleft.rc_refcount + adjust &&
  680. ulen < MAXREFCEXTLEN) {
  681. *shape_changed = true;
  682. return xfs_refcount_merge_center_extents(cur, &left, &cleft,
  683. &right, ulen, aglen);
  684. }
  685. /* Try to merge left and cleft. */
  686. ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount;
  687. if (xfs_refc_valid(&left) && xfs_refc_valid(&cleft) &&
  688. left.rc_refcount == cleft.rc_refcount + adjust &&
  689. ulen < MAXREFCEXTLEN) {
  690. *shape_changed = true;
  691. error = xfs_refcount_merge_left_extent(cur, &left, &cleft,
  692. agbno, aglen);
  693. if (error)
  694. return error;
  695. /*
  696. * If we just merged left + cleft and cleft == cright,
  697. * we no longer have a cright to merge with right. We're done.
  698. */
  699. if (cequal)
  700. return 0;
  701. }
  702. /* Try to merge cright and right. */
  703. ulen = (unsigned long long)right.rc_blockcount + cright.rc_blockcount;
  704. if (xfs_refc_valid(&right) && xfs_refc_valid(&cright) &&
  705. right.rc_refcount == cright.rc_refcount + adjust &&
  706. ulen < MAXREFCEXTLEN) {
  707. *shape_changed = true;
  708. return xfs_refcount_merge_right_extent(cur, &right, &cright,
  709. aglen);
  710. }
  711. return error;
  712. }
  713. /*
  714. * XXX: This is a pretty hand-wavy estimate. The penalty for guessing
  715. * true incorrectly is a shutdown FS; the penalty for guessing false
  716. * incorrectly is more transaction rolls than might be necessary.
  717. * Be conservative here.
  718. */
  719. static bool
  720. xfs_refcount_still_have_space(
  721. struct xfs_btree_cur *cur)
  722. {
  723. unsigned long overhead;
  724. overhead = cur->bc_private.a.priv.refc.shape_changes *
  725. xfs_allocfree_log_count(cur->bc_mp, 1);
  726. overhead *= cur->bc_mp->m_sb.sb_blocksize;
  727. /*
  728. * Only allow 2 refcount extent updates per transaction if the
  729. * refcount continue update "error" has been injected.
  730. */
  731. if (cur->bc_private.a.priv.refc.nr_ops > 2 &&
  732. XFS_TEST_ERROR(false, cur->bc_mp,
  733. XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE))
  734. return false;
  735. if (cur->bc_private.a.priv.refc.nr_ops == 0)
  736. return true;
  737. else if (overhead > cur->bc_tp->t_log_res)
  738. return false;
  739. return cur->bc_tp->t_log_res - overhead >
  740. cur->bc_private.a.priv.refc.nr_ops * XFS_REFCOUNT_ITEM_OVERHEAD;
  741. }
  742. /*
  743. * Adjust the refcounts of middle extents. At this point we should have
  744. * split extents that crossed the adjustment range; merged with adjacent
  745. * extents; and updated agbno/aglen to reflect the merges. Therefore,
  746. * all we have to do is update the extents inside [agbno, agbno + aglen].
  747. */
  748. STATIC int
  749. xfs_refcount_adjust_extents(
  750. struct xfs_btree_cur *cur,
  751. xfs_agblock_t *agbno,
  752. xfs_extlen_t *aglen,
  753. enum xfs_refc_adjust_op adj,
  754. struct xfs_defer_ops *dfops,
  755. struct xfs_owner_info *oinfo)
  756. {
  757. struct xfs_refcount_irec ext, tmp;
  758. int error;
  759. int found_rec, found_tmp;
  760. xfs_fsblock_t fsbno;
  761. /* Merging did all the work already. */
  762. if (*aglen == 0)
  763. return 0;
  764. error = xfs_refcount_lookup_ge(cur, *agbno, &found_rec);
  765. if (error)
  766. goto out_error;
  767. while (*aglen > 0 && xfs_refcount_still_have_space(cur)) {
  768. error = xfs_refcount_get_rec(cur, &ext, &found_rec);
  769. if (error)
  770. goto out_error;
  771. if (!found_rec) {
  772. ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
  773. ext.rc_blockcount = 0;
  774. ext.rc_refcount = 0;
  775. }
  776. /*
  777. * Deal with a hole in the refcount tree; if a file maps to
  778. * these blocks and there's no refcountbt record, pretend that
  779. * there is one with refcount == 1.
  780. */
  781. if (ext.rc_startblock != *agbno) {
  782. tmp.rc_startblock = *agbno;
  783. tmp.rc_blockcount = min(*aglen,
  784. ext.rc_startblock - *agbno);
  785. tmp.rc_refcount = 1 + adj;
  786. trace_xfs_refcount_modify_extent(cur->bc_mp,
  787. cur->bc_private.a.agno, &tmp);
  788. /*
  789. * Either cover the hole (increment) or
  790. * delete the range (decrement).
  791. */
  792. if (tmp.rc_refcount) {
  793. error = xfs_refcount_insert(cur, &tmp,
  794. &found_tmp);
  795. if (error)
  796. goto out_error;
  797. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  798. found_tmp == 1, out_error);
  799. cur->bc_private.a.priv.refc.nr_ops++;
  800. } else {
  801. fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
  802. cur->bc_private.a.agno,
  803. tmp.rc_startblock);
  804. xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
  805. tmp.rc_blockcount, oinfo);
  806. }
  807. (*agbno) += tmp.rc_blockcount;
  808. (*aglen) -= tmp.rc_blockcount;
  809. error = xfs_refcount_lookup_ge(cur, *agbno,
  810. &found_rec);
  811. if (error)
  812. goto out_error;
  813. }
  814. /* Stop if there's nothing left to modify */
  815. if (*aglen == 0 || !xfs_refcount_still_have_space(cur))
  816. break;
  817. /*
  818. * Adjust the reference count and either update the tree
  819. * (incr) or free the blocks (decr).
  820. */
  821. if (ext.rc_refcount == MAXREFCOUNT)
  822. goto skip;
  823. ext.rc_refcount += adj;
  824. trace_xfs_refcount_modify_extent(cur->bc_mp,
  825. cur->bc_private.a.agno, &ext);
  826. if (ext.rc_refcount > 1) {
  827. error = xfs_refcount_update(cur, &ext);
  828. if (error)
  829. goto out_error;
  830. cur->bc_private.a.priv.refc.nr_ops++;
  831. } else if (ext.rc_refcount == 1) {
  832. error = xfs_refcount_delete(cur, &found_rec);
  833. if (error)
  834. goto out_error;
  835. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  836. found_rec == 1, out_error);
  837. cur->bc_private.a.priv.refc.nr_ops++;
  838. goto advloop;
  839. } else {
  840. fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
  841. cur->bc_private.a.agno,
  842. ext.rc_startblock);
  843. xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
  844. ext.rc_blockcount, oinfo);
  845. }
  846. skip:
  847. error = xfs_btree_increment(cur, 0, &found_rec);
  848. if (error)
  849. goto out_error;
  850. advloop:
  851. (*agbno) += ext.rc_blockcount;
  852. (*aglen) -= ext.rc_blockcount;
  853. }
  854. return error;
  855. out_error:
  856. trace_xfs_refcount_modify_extent_error(cur->bc_mp,
  857. cur->bc_private.a.agno, error, _RET_IP_);
  858. return error;
  859. }
  860. /* Adjust the reference count of a range of AG blocks. */
  861. STATIC int
  862. xfs_refcount_adjust(
  863. struct xfs_btree_cur *cur,
  864. xfs_agblock_t agbno,
  865. xfs_extlen_t aglen,
  866. xfs_agblock_t *new_agbno,
  867. xfs_extlen_t *new_aglen,
  868. enum xfs_refc_adjust_op adj,
  869. struct xfs_defer_ops *dfops,
  870. struct xfs_owner_info *oinfo)
  871. {
  872. bool shape_changed;
  873. int shape_changes = 0;
  874. int error;
  875. *new_agbno = agbno;
  876. *new_aglen = aglen;
  877. if (adj == XFS_REFCOUNT_ADJUST_INCREASE)
  878. trace_xfs_refcount_increase(cur->bc_mp, cur->bc_private.a.agno,
  879. agbno, aglen);
  880. else
  881. trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_private.a.agno,
  882. agbno, aglen);
  883. /*
  884. * Ensure that no rcextents cross the boundary of the adjustment range.
  885. */
  886. error = xfs_refcount_split_extent(cur, agbno, &shape_changed);
  887. if (error)
  888. goto out_error;
  889. if (shape_changed)
  890. shape_changes++;
  891. error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed);
  892. if (error)
  893. goto out_error;
  894. if (shape_changed)
  895. shape_changes++;
  896. /*
  897. * Try to merge with the left or right extents of the range.
  898. */
  899. error = xfs_refcount_merge_extents(cur, new_agbno, new_aglen, adj,
  900. XFS_FIND_RCEXT_SHARED, &shape_changed);
  901. if (error)
  902. goto out_error;
  903. if (shape_changed)
  904. shape_changes++;
  905. if (shape_changes)
  906. cur->bc_private.a.priv.refc.shape_changes++;
  907. /* Now that we've taken care of the ends, adjust the middle extents */
  908. error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen,
  909. adj, dfops, oinfo);
  910. if (error)
  911. goto out_error;
  912. return 0;
  913. out_error:
  914. trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_private.a.agno,
  915. error, _RET_IP_);
  916. return error;
  917. }
  918. /* Clean up after calling xfs_refcount_finish_one. */
  919. void
  920. xfs_refcount_finish_one_cleanup(
  921. struct xfs_trans *tp,
  922. struct xfs_btree_cur *rcur,
  923. int error)
  924. {
  925. struct xfs_buf *agbp;
  926. if (rcur == NULL)
  927. return;
  928. agbp = rcur->bc_private.a.agbp;
  929. xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
  930. if (error)
  931. xfs_trans_brelse(tp, agbp);
  932. }
  933. /*
  934. * Process one of the deferred refcount operations. We pass back the
  935. * btree cursor to maintain our lock on the btree between calls.
  936. * This saves time and eliminates a buffer deadlock between the
  937. * superblock and the AGF because we'll always grab them in the same
  938. * order.
  939. */
  940. int
  941. xfs_refcount_finish_one(
  942. struct xfs_trans *tp,
  943. struct xfs_defer_ops *dfops,
  944. enum xfs_refcount_intent_type type,
  945. xfs_fsblock_t startblock,
  946. xfs_extlen_t blockcount,
  947. xfs_fsblock_t *new_fsb,
  948. xfs_extlen_t *new_len,
  949. struct xfs_btree_cur **pcur)
  950. {
  951. struct xfs_mount *mp = tp->t_mountp;
  952. struct xfs_btree_cur *rcur;
  953. struct xfs_buf *agbp = NULL;
  954. int error = 0;
  955. xfs_agnumber_t agno;
  956. xfs_agblock_t bno;
  957. xfs_agblock_t new_agbno;
  958. unsigned long nr_ops = 0;
  959. int shape_changes = 0;
  960. agno = XFS_FSB_TO_AGNO(mp, startblock);
  961. ASSERT(agno != NULLAGNUMBER);
  962. bno = XFS_FSB_TO_AGBNO(mp, startblock);
  963. trace_xfs_refcount_deferred(mp, XFS_FSB_TO_AGNO(mp, startblock),
  964. type, XFS_FSB_TO_AGBNO(mp, startblock),
  965. blockcount);
  966. if (XFS_TEST_ERROR(false, mp,
  967. XFS_ERRTAG_REFCOUNT_FINISH_ONE))
  968. return -EIO;
  969. /*
  970. * If we haven't gotten a cursor or the cursor AG doesn't match
  971. * the startblock, get one now.
  972. */
  973. rcur = *pcur;
  974. if (rcur != NULL && rcur->bc_private.a.agno != agno) {
  975. nr_ops = rcur->bc_private.a.priv.refc.nr_ops;
  976. shape_changes = rcur->bc_private.a.priv.refc.shape_changes;
  977. xfs_refcount_finish_one_cleanup(tp, rcur, 0);
  978. rcur = NULL;
  979. *pcur = NULL;
  980. }
  981. if (rcur == NULL) {
  982. error = xfs_alloc_read_agf(tp->t_mountp, tp, agno,
  983. XFS_ALLOC_FLAG_FREEING, &agbp);
  984. if (error)
  985. return error;
  986. if (!agbp)
  987. return -EFSCORRUPTED;
  988. rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, dfops);
  989. if (!rcur) {
  990. error = -ENOMEM;
  991. goto out_cur;
  992. }
  993. rcur->bc_private.a.priv.refc.nr_ops = nr_ops;
  994. rcur->bc_private.a.priv.refc.shape_changes = shape_changes;
  995. }
  996. *pcur = rcur;
  997. switch (type) {
  998. case XFS_REFCOUNT_INCREASE:
  999. error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
  1000. new_len, XFS_REFCOUNT_ADJUST_INCREASE, dfops, NULL);
  1001. *new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
  1002. break;
  1003. case XFS_REFCOUNT_DECREASE:
  1004. error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
  1005. new_len, XFS_REFCOUNT_ADJUST_DECREASE, dfops, NULL);
  1006. *new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
  1007. break;
  1008. case XFS_REFCOUNT_ALLOC_COW:
  1009. *new_fsb = startblock + blockcount;
  1010. *new_len = 0;
  1011. error = __xfs_refcount_cow_alloc(rcur, bno, blockcount, dfops);
  1012. break;
  1013. case XFS_REFCOUNT_FREE_COW:
  1014. *new_fsb = startblock + blockcount;
  1015. *new_len = 0;
  1016. error = __xfs_refcount_cow_free(rcur, bno, blockcount, dfops);
  1017. break;
  1018. default:
  1019. ASSERT(0);
  1020. error = -EFSCORRUPTED;
  1021. }
  1022. if (!error && *new_len > 0)
  1023. trace_xfs_refcount_finish_one_leftover(mp, agno, type,
  1024. bno, blockcount, new_agbno, *new_len);
  1025. return error;
  1026. out_cur:
  1027. xfs_trans_brelse(tp, agbp);
  1028. return error;
  1029. }
  1030. /*
  1031. * Record a refcount intent for later processing.
  1032. */
  1033. static int
  1034. __xfs_refcount_add(
  1035. struct xfs_mount *mp,
  1036. struct xfs_defer_ops *dfops,
  1037. enum xfs_refcount_intent_type type,
  1038. xfs_fsblock_t startblock,
  1039. xfs_extlen_t blockcount)
  1040. {
  1041. struct xfs_refcount_intent *ri;
  1042. trace_xfs_refcount_defer(mp, XFS_FSB_TO_AGNO(mp, startblock),
  1043. type, XFS_FSB_TO_AGBNO(mp, startblock),
  1044. blockcount);
  1045. ri = kmem_alloc(sizeof(struct xfs_refcount_intent),
  1046. KM_SLEEP | KM_NOFS);
  1047. INIT_LIST_HEAD(&ri->ri_list);
  1048. ri->ri_type = type;
  1049. ri->ri_startblock = startblock;
  1050. ri->ri_blockcount = blockcount;
  1051. xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
  1052. return 0;
  1053. }
  1054. /*
  1055. * Increase the reference count of the blocks backing a file's extent.
  1056. */
  1057. int
  1058. xfs_refcount_increase_extent(
  1059. struct xfs_mount *mp,
  1060. struct xfs_defer_ops *dfops,
  1061. struct xfs_bmbt_irec *PREV)
  1062. {
  1063. if (!xfs_sb_version_hasreflink(&mp->m_sb))
  1064. return 0;
  1065. return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_INCREASE,
  1066. PREV->br_startblock, PREV->br_blockcount);
  1067. }
  1068. /*
  1069. * Decrease the reference count of the blocks backing a file's extent.
  1070. */
  1071. int
  1072. xfs_refcount_decrease_extent(
  1073. struct xfs_mount *mp,
  1074. struct xfs_defer_ops *dfops,
  1075. struct xfs_bmbt_irec *PREV)
  1076. {
  1077. if (!xfs_sb_version_hasreflink(&mp->m_sb))
  1078. return 0;
  1079. return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_DECREASE,
  1080. PREV->br_startblock, PREV->br_blockcount);
  1081. }
  1082. /*
  1083. * Given an AG extent, find the lowest-numbered run of shared blocks
  1084. * within that range and return the range in fbno/flen. If
  1085. * find_end_of_shared is set, return the longest contiguous extent of
  1086. * shared blocks; if not, just return the first extent we find. If no
  1087. * shared blocks are found, fbno and flen will be set to NULLAGBLOCK
  1088. * and 0, respectively.
  1089. */
  1090. int
  1091. xfs_refcount_find_shared(
  1092. struct xfs_btree_cur *cur,
  1093. xfs_agblock_t agbno,
  1094. xfs_extlen_t aglen,
  1095. xfs_agblock_t *fbno,
  1096. xfs_extlen_t *flen,
  1097. bool find_end_of_shared)
  1098. {
  1099. struct xfs_refcount_irec tmp;
  1100. int i;
  1101. int have;
  1102. int error;
  1103. trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_private.a.agno,
  1104. agbno, aglen);
  1105. /* By default, skip the whole range */
  1106. *fbno = NULLAGBLOCK;
  1107. *flen = 0;
  1108. /* Try to find a refcount extent that crosses the start */
  1109. error = xfs_refcount_lookup_le(cur, agbno, &have);
  1110. if (error)
  1111. goto out_error;
  1112. if (!have) {
  1113. /* No left extent, look at the next one */
  1114. error = xfs_btree_increment(cur, 0, &have);
  1115. if (error)
  1116. goto out_error;
  1117. if (!have)
  1118. goto done;
  1119. }
  1120. error = xfs_refcount_get_rec(cur, &tmp, &i);
  1121. if (error)
  1122. goto out_error;
  1123. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
  1124. /* If the extent ends before the start, look at the next one */
  1125. if (tmp.rc_startblock + tmp.rc_blockcount <= agbno) {
  1126. error = xfs_btree_increment(cur, 0, &have);
  1127. if (error)
  1128. goto out_error;
  1129. if (!have)
  1130. goto done;
  1131. error = xfs_refcount_get_rec(cur, &tmp, &i);
  1132. if (error)
  1133. goto out_error;
  1134. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
  1135. }
  1136. /* If the extent starts after the range we want, bail out */
  1137. if (tmp.rc_startblock >= agbno + aglen)
  1138. goto done;
  1139. /* We found the start of a shared extent! */
  1140. if (tmp.rc_startblock < agbno) {
  1141. tmp.rc_blockcount -= (agbno - tmp.rc_startblock);
  1142. tmp.rc_startblock = agbno;
  1143. }
  1144. *fbno = tmp.rc_startblock;
  1145. *flen = min(tmp.rc_blockcount, agbno + aglen - *fbno);
  1146. if (!find_end_of_shared)
  1147. goto done;
  1148. /* Otherwise, find the end of this shared extent */
  1149. while (*fbno + *flen < agbno + aglen) {
  1150. error = xfs_btree_increment(cur, 0, &have);
  1151. if (error)
  1152. goto out_error;
  1153. if (!have)
  1154. break;
  1155. error = xfs_refcount_get_rec(cur, &tmp, &i);
  1156. if (error)
  1157. goto out_error;
  1158. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
  1159. if (tmp.rc_startblock >= agbno + aglen ||
  1160. tmp.rc_startblock != *fbno + *flen)
  1161. break;
  1162. *flen = min(*flen + tmp.rc_blockcount, agbno + aglen - *fbno);
  1163. }
  1164. done:
  1165. trace_xfs_refcount_find_shared_result(cur->bc_mp,
  1166. cur->bc_private.a.agno, *fbno, *flen);
  1167. out_error:
  1168. if (error)
  1169. trace_xfs_refcount_find_shared_error(cur->bc_mp,
  1170. cur->bc_private.a.agno, error, _RET_IP_);
  1171. return error;
  1172. }
  1173. /*
  1174. * Recovering CoW Blocks After a Crash
  1175. *
  1176. * Due to the way that the copy on write mechanism works, there's a window of
  1177. * opportunity in which we can lose track of allocated blocks during a crash.
  1178. * Because CoW uses delayed allocation in the in-core CoW fork, writeback
  1179. * causes blocks to be allocated and stored in the CoW fork. The blocks are
  1180. * no longer in the free space btree but are not otherwise recorded anywhere
  1181. * until the write completes and the blocks are mapped into the file. A crash
  1182. * in between allocation and remapping results in the replacement blocks being
  1183. * lost. This situation is exacerbated by the CoW extent size hint because
  1184. * allocations can hang around for long time.
  1185. *
  1186. * However, there is a place where we can record these allocations before they
  1187. * become mappings -- the reference count btree. The btree does not record
  1188. * extents with refcount == 1, so we can record allocations with a refcount of
  1189. * 1. Blocks being used for CoW writeout cannot be shared, so there should be
  1190. * no conflict with shared block records. These mappings should be created
  1191. * when we allocate blocks to the CoW fork and deleted when they're removed
  1192. * from the CoW fork.
  1193. *
  1194. * Minor nit: records for in-progress CoW allocations and records for shared
  1195. * extents must never be merged, to preserve the property that (except for CoW
  1196. * allocations) there are no refcount btree entries with refcount == 1. The
  1197. * only time this could potentially happen is when unsharing a block that's
  1198. * adjacent to CoW allocations, so we must be careful to avoid this.
  1199. *
  1200. * At mount time we recover lost CoW allocations by searching the refcount
  1201. * btree for these refcount == 1 mappings. These represent CoW allocations
  1202. * that were in progress at the time the filesystem went down, so we can free
  1203. * them to get the space back.
  1204. *
  1205. * This mechanism is superior to creating EFIs for unmapped CoW extents for
  1206. * several reasons -- first, EFIs pin the tail of the log and would have to be
  1207. * periodically relogged to avoid filling up the log. Second, CoW completions
  1208. * will have to file an EFD and create new EFIs for whatever remains in the
  1209. * CoW fork; this partially takes care of (1) but extent-size reservations
  1210. * will have to periodically relog even if there's no writeout in progress.
  1211. * This can happen if the CoW extent size hint is set, which you really want.
  1212. * Third, EFIs cannot currently be automatically relogged into newer
  1213. * transactions to advance the log tail. Fourth, stuffing the log full of
  1214. * EFIs places an upper bound on the number of CoW allocations that can be
  1215. * held filesystem-wide at any given time. Recording them in the refcount
  1216. * btree doesn't require us to maintain any state in memory and doesn't pin
  1217. * the log.
  1218. */
  1219. /*
  1220. * Adjust the refcounts of CoW allocations. These allocations are "magic"
  1221. * in that they're not referenced anywhere else in the filesystem, so we
  1222. * stash them in the refcount btree with a refcount of 1 until either file
  1223. * remapping (or CoW cancellation) happens.
  1224. */
  1225. STATIC int
  1226. xfs_refcount_adjust_cow_extents(
  1227. struct xfs_btree_cur *cur,
  1228. xfs_agblock_t agbno,
  1229. xfs_extlen_t aglen,
  1230. enum xfs_refc_adjust_op adj)
  1231. {
  1232. struct xfs_refcount_irec ext, tmp;
  1233. int error;
  1234. int found_rec, found_tmp;
  1235. if (aglen == 0)
  1236. return 0;
  1237. /* Find any overlapping refcount records */
  1238. error = xfs_refcount_lookup_ge(cur, agbno, &found_rec);
  1239. if (error)
  1240. goto out_error;
  1241. error = xfs_refcount_get_rec(cur, &ext, &found_rec);
  1242. if (error)
  1243. goto out_error;
  1244. if (!found_rec) {
  1245. ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks +
  1246. XFS_REFC_COW_START;
  1247. ext.rc_blockcount = 0;
  1248. ext.rc_refcount = 0;
  1249. }
  1250. switch (adj) {
  1251. case XFS_REFCOUNT_ADJUST_COW_ALLOC:
  1252. /* Adding a CoW reservation, there should be nothing here. */
  1253. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  1254. ext.rc_startblock >= agbno + aglen, out_error);
  1255. tmp.rc_startblock = agbno;
  1256. tmp.rc_blockcount = aglen;
  1257. tmp.rc_refcount = 1;
  1258. trace_xfs_refcount_modify_extent(cur->bc_mp,
  1259. cur->bc_private.a.agno, &tmp);
  1260. error = xfs_refcount_insert(cur, &tmp,
  1261. &found_tmp);
  1262. if (error)
  1263. goto out_error;
  1264. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  1265. found_tmp == 1, out_error);
  1266. break;
  1267. case XFS_REFCOUNT_ADJUST_COW_FREE:
  1268. /* Removing a CoW reservation, there should be one extent. */
  1269. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  1270. ext.rc_startblock == agbno, out_error);
  1271. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  1272. ext.rc_blockcount == aglen, out_error);
  1273. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  1274. ext.rc_refcount == 1, out_error);
  1275. ext.rc_refcount = 0;
  1276. trace_xfs_refcount_modify_extent(cur->bc_mp,
  1277. cur->bc_private.a.agno, &ext);
  1278. error = xfs_refcount_delete(cur, &found_rec);
  1279. if (error)
  1280. goto out_error;
  1281. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  1282. found_rec == 1, out_error);
  1283. break;
  1284. default:
  1285. ASSERT(0);
  1286. }
  1287. return error;
  1288. out_error:
  1289. trace_xfs_refcount_modify_extent_error(cur->bc_mp,
  1290. cur->bc_private.a.agno, error, _RET_IP_);
  1291. return error;
  1292. }
  1293. /*
  1294. * Add or remove refcount btree entries for CoW reservations.
  1295. */
  1296. STATIC int
  1297. xfs_refcount_adjust_cow(
  1298. struct xfs_btree_cur *cur,
  1299. xfs_agblock_t agbno,
  1300. xfs_extlen_t aglen,
  1301. enum xfs_refc_adjust_op adj)
  1302. {
  1303. bool shape_changed;
  1304. int error;
  1305. agbno += XFS_REFC_COW_START;
  1306. /*
  1307. * Ensure that no rcextents cross the boundary of the adjustment range.
  1308. */
  1309. error = xfs_refcount_split_extent(cur, agbno, &shape_changed);
  1310. if (error)
  1311. goto out_error;
  1312. error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed);
  1313. if (error)
  1314. goto out_error;
  1315. /*
  1316. * Try to merge with the left or right extents of the range.
  1317. */
  1318. error = xfs_refcount_merge_extents(cur, &agbno, &aglen, adj,
  1319. XFS_FIND_RCEXT_COW, &shape_changed);
  1320. if (error)
  1321. goto out_error;
  1322. /* Now that we've taken care of the ends, adjust the middle extents */
  1323. error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj);
  1324. if (error)
  1325. goto out_error;
  1326. return 0;
  1327. out_error:
  1328. trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_private.a.agno,
  1329. error, _RET_IP_);
  1330. return error;
  1331. }
  1332. /*
  1333. * Record a CoW allocation in the refcount btree.
  1334. */
  1335. STATIC int
  1336. __xfs_refcount_cow_alloc(
  1337. struct xfs_btree_cur *rcur,
  1338. xfs_agblock_t agbno,
  1339. xfs_extlen_t aglen,
  1340. struct xfs_defer_ops *dfops)
  1341. {
  1342. trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
  1343. agbno, aglen);
  1344. /* Add refcount btree reservation */
  1345. return xfs_refcount_adjust_cow(rcur, agbno, aglen,
  1346. XFS_REFCOUNT_ADJUST_COW_ALLOC);
  1347. }
  1348. /*
  1349. * Remove a CoW allocation from the refcount btree.
  1350. */
  1351. STATIC int
  1352. __xfs_refcount_cow_free(
  1353. struct xfs_btree_cur *rcur,
  1354. xfs_agblock_t agbno,
  1355. xfs_extlen_t aglen,
  1356. struct xfs_defer_ops *dfops)
  1357. {
  1358. trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
  1359. agbno, aglen);
  1360. /* Remove refcount btree reservation */
  1361. return xfs_refcount_adjust_cow(rcur, agbno, aglen,
  1362. XFS_REFCOUNT_ADJUST_COW_FREE);
  1363. }
  1364. /* Record a CoW staging extent in the refcount btree. */
  1365. int
  1366. xfs_refcount_alloc_cow_extent(
  1367. struct xfs_mount *mp,
  1368. struct xfs_defer_ops *dfops,
  1369. xfs_fsblock_t fsb,
  1370. xfs_extlen_t len)
  1371. {
  1372. int error;
  1373. if (!xfs_sb_version_hasreflink(&mp->m_sb))
  1374. return 0;
  1375. error = __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
  1376. fsb, len);
  1377. if (error)
  1378. return error;
  1379. /* Add rmap entry */
  1380. return xfs_rmap_alloc_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
  1381. XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
  1382. }
  1383. /* Forget a CoW staging event in the refcount btree. */
  1384. int
  1385. xfs_refcount_free_cow_extent(
  1386. struct xfs_mount *mp,
  1387. struct xfs_defer_ops *dfops,
  1388. xfs_fsblock_t fsb,
  1389. xfs_extlen_t len)
  1390. {
  1391. int error;
  1392. if (!xfs_sb_version_hasreflink(&mp->m_sb))
  1393. return 0;
  1394. /* Remove rmap entry */
  1395. error = xfs_rmap_free_extent(mp, dfops, XFS_FSB_TO_AGNO(mp, fsb),
  1396. XFS_FSB_TO_AGBNO(mp, fsb), len, XFS_RMAP_OWN_COW);
  1397. if (error)
  1398. return error;
  1399. return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW,
  1400. fsb, len);
  1401. }
  1402. struct xfs_refcount_recovery {
  1403. struct list_head rr_list;
  1404. struct xfs_refcount_irec rr_rrec;
  1405. };
  1406. /* Stuff an extent on the recovery list. */
  1407. STATIC int
  1408. xfs_refcount_recover_extent(
  1409. struct xfs_btree_cur *cur,
  1410. union xfs_btree_rec *rec,
  1411. void *priv)
  1412. {
  1413. struct list_head *debris = priv;
  1414. struct xfs_refcount_recovery *rr;
  1415. if (be32_to_cpu(rec->refc.rc_refcount) != 1)
  1416. return -EFSCORRUPTED;
  1417. rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), KM_SLEEP);
  1418. xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
  1419. list_add_tail(&rr->rr_list, debris);
  1420. return 0;
  1421. }
  1422. /* Find and remove leftover CoW reservations. */
  1423. int
  1424. xfs_refcount_recover_cow_leftovers(
  1425. struct xfs_mount *mp,
  1426. xfs_agnumber_t agno)
  1427. {
  1428. struct xfs_trans *tp;
  1429. struct xfs_btree_cur *cur;
  1430. struct xfs_buf *agbp;
  1431. struct xfs_refcount_recovery *rr, *n;
  1432. struct list_head debris;
  1433. union xfs_btree_irec low;
  1434. union xfs_btree_irec high;
  1435. struct xfs_defer_ops dfops;
  1436. xfs_fsblock_t fsb;
  1437. xfs_agblock_t agbno;
  1438. int error;
  1439. if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
  1440. return -EOPNOTSUPP;
  1441. INIT_LIST_HEAD(&debris);
  1442. /*
  1443. * In this first part, we use an empty transaction to gather up
  1444. * all the leftover CoW extents so that we can subsequently
  1445. * delete them. The empty transaction is used to avoid
  1446. * a buffer lock deadlock if there happens to be a loop in the
  1447. * refcountbt because we're allowed to re-grab a buffer that is
  1448. * already attached to our transaction. When we're done
  1449. * recording the CoW debris we cancel the (empty) transaction
  1450. * and everything goes away cleanly.
  1451. */
  1452. error = xfs_trans_alloc_empty(mp, &tp);
  1453. if (error)
  1454. return error;
  1455. error = xfs_alloc_read_agf(mp, tp, agno, 0, &agbp);
  1456. if (error)
  1457. goto out_trans;
  1458. if (!agbp) {
  1459. error = -ENOMEM;
  1460. goto out_trans;
  1461. }
  1462. cur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, NULL);
  1463. /* Find all the leftover CoW staging extents. */
  1464. memset(&low, 0, sizeof(low));
  1465. memset(&high, 0, sizeof(high));
  1466. low.rc.rc_startblock = XFS_REFC_COW_START;
  1467. high.rc.rc_startblock = -1U;
  1468. error = xfs_btree_query_range(cur, &low, &high,
  1469. xfs_refcount_recover_extent, &debris);
  1470. if (error)
  1471. goto out_cursor;
  1472. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  1473. xfs_trans_brelse(tp, agbp);
  1474. xfs_trans_cancel(tp);
  1475. /* Now iterate the list to free the leftovers */
  1476. list_for_each_entry_safe(rr, n, &debris, rr_list) {
  1477. /* Set up transaction. */
  1478. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
  1479. if (error)
  1480. goto out_free;
  1481. trace_xfs_refcount_recover_extent(mp, agno, &rr->rr_rrec);
  1482. /* Free the orphan record */
  1483. xfs_defer_init(&dfops, &fsb);
  1484. agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
  1485. fsb = XFS_AGB_TO_FSB(mp, agno, agbno);
  1486. error = xfs_refcount_free_cow_extent(mp, &dfops, fsb,
  1487. rr->rr_rrec.rc_blockcount);
  1488. if (error)
  1489. goto out_defer;
  1490. /* Free the block. */
  1491. xfs_bmap_add_free(mp, &dfops, fsb,
  1492. rr->rr_rrec.rc_blockcount, NULL);
  1493. error = xfs_defer_finish(&tp, &dfops);
  1494. if (error)
  1495. goto out_defer;
  1496. error = xfs_trans_commit(tp);
  1497. if (error)
  1498. goto out_free;
  1499. list_del(&rr->rr_list);
  1500. kmem_free(rr);
  1501. }
  1502. return error;
  1503. out_defer:
  1504. xfs_defer_cancel(&dfops);
  1505. out_trans:
  1506. xfs_trans_cancel(tp);
  1507. out_free:
  1508. /* Free the leftover list */
  1509. list_for_each_entry_safe(rr, n, &debris, rr_list) {
  1510. list_del(&rr->rr_list);
  1511. kmem_free(rr);
  1512. }
  1513. return error;
  1514. out_cursor:
  1515. xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
  1516. xfs_trans_brelse(tp, agbp);
  1517. goto out_trans;
  1518. }
  1519. /* Is there a record covering a given extent? */
  1520. int
  1521. xfs_refcount_has_record(
  1522. struct xfs_btree_cur *cur,
  1523. xfs_agblock_t bno,
  1524. xfs_extlen_t len,
  1525. bool *exists)
  1526. {
  1527. union xfs_btree_irec low;
  1528. union xfs_btree_irec high;
  1529. memset(&low, 0, sizeof(low));
  1530. low.rc.rc_startblock = bno;
  1531. memset(&high, 0xFF, sizeof(high));
  1532. high.rc.rc_startblock = bno + len - 1;
  1533. return xfs_btree_has_record(cur, &low, &high, exists);
  1534. }