xfs_refcount.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698
  1. /*
  2. * Copyright (C) 2016 Oracle. All Rights Reserved.
  3. *
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it would be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write the Free Software Foundation,
  18. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
  19. */
  20. #include "xfs.h"
  21. #include "xfs_fs.h"
  22. #include "xfs_shared.h"
  23. #include "xfs_format.h"
  24. #include "xfs_log_format.h"
  25. #include "xfs_trans_resv.h"
  26. #include "xfs_sb.h"
  27. #include "xfs_mount.h"
  28. #include "xfs_defer.h"
  29. #include "xfs_btree.h"
  30. #include "xfs_bmap.h"
  31. #include "xfs_refcount_btree.h"
  32. #include "xfs_alloc.h"
  33. #include "xfs_error.h"
  34. #include "xfs_trace.h"
  35. #include "xfs_cksum.h"
  36. #include "xfs_trans.h"
  37. #include "xfs_bit.h"
  38. #include "xfs_refcount.h"
  39. #include "xfs_rmap.h"
  40. /* Allowable refcount adjustment amounts. */
  41. enum xfs_refc_adjust_op {
  42. XFS_REFCOUNT_ADJUST_INCREASE = 1,
  43. XFS_REFCOUNT_ADJUST_DECREASE = -1,
  44. XFS_REFCOUNT_ADJUST_COW_ALLOC = 0,
  45. XFS_REFCOUNT_ADJUST_COW_FREE = -1,
  46. };
  47. STATIC int __xfs_refcount_cow_alloc(struct xfs_btree_cur *rcur,
  48. xfs_agblock_t agbno, xfs_extlen_t aglen,
  49. struct xfs_defer_ops *dfops);
  50. STATIC int __xfs_refcount_cow_free(struct xfs_btree_cur *rcur,
  51. xfs_agblock_t agbno, xfs_extlen_t aglen,
  52. struct xfs_defer_ops *dfops);
  53. /*
  54. * Look up the first record less than or equal to [bno, len] in the btree
  55. * given by cur.
  56. */
  57. int
  58. xfs_refcount_lookup_le(
  59. struct xfs_btree_cur *cur,
  60. xfs_agblock_t bno,
  61. int *stat)
  62. {
  63. trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
  64. XFS_LOOKUP_LE);
  65. cur->bc_rec.rc.rc_startblock = bno;
  66. cur->bc_rec.rc.rc_blockcount = 0;
  67. return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
  68. }
  69. /*
  70. * Look up the first record greater than or equal to [bno, len] in the btree
  71. * given by cur.
  72. */
  73. int
  74. xfs_refcount_lookup_ge(
  75. struct xfs_btree_cur *cur,
  76. xfs_agblock_t bno,
  77. int *stat)
  78. {
  79. trace_xfs_refcount_lookup(cur->bc_mp, cur->bc_private.a.agno, bno,
  80. XFS_LOOKUP_GE);
  81. cur->bc_rec.rc.rc_startblock = bno;
  82. cur->bc_rec.rc.rc_blockcount = 0;
  83. return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
  84. }
  85. /* Convert on-disk record to in-core format. */
  86. static inline void
  87. xfs_refcount_btrec_to_irec(
  88. union xfs_btree_rec *rec,
  89. struct xfs_refcount_irec *irec)
  90. {
  91. irec->rc_startblock = be32_to_cpu(rec->refc.rc_startblock);
  92. irec->rc_blockcount = be32_to_cpu(rec->refc.rc_blockcount);
  93. irec->rc_refcount = be32_to_cpu(rec->refc.rc_refcount);
  94. }
  95. /*
  96. * Get the data from the pointed-to record.
  97. */
  98. int
  99. xfs_refcount_get_rec(
  100. struct xfs_btree_cur *cur,
  101. struct xfs_refcount_irec *irec,
  102. int *stat)
  103. {
  104. union xfs_btree_rec *rec;
  105. int error;
  106. error = xfs_btree_get_rec(cur, &rec, stat);
  107. if (!error && *stat == 1) {
  108. xfs_refcount_btrec_to_irec(rec, irec);
  109. trace_xfs_refcount_get(cur->bc_mp, cur->bc_private.a.agno,
  110. irec);
  111. }
  112. return error;
  113. }
  114. /*
  115. * Update the record referred to by cur to the value given
  116. * by [bno, len, refcount].
  117. * This either works (return 0) or gets an EFSCORRUPTED error.
  118. */
  119. STATIC int
  120. xfs_refcount_update(
  121. struct xfs_btree_cur *cur,
  122. struct xfs_refcount_irec *irec)
  123. {
  124. union xfs_btree_rec rec;
  125. int error;
  126. trace_xfs_refcount_update(cur->bc_mp, cur->bc_private.a.agno, irec);
  127. rec.refc.rc_startblock = cpu_to_be32(irec->rc_startblock);
  128. rec.refc.rc_blockcount = cpu_to_be32(irec->rc_blockcount);
  129. rec.refc.rc_refcount = cpu_to_be32(irec->rc_refcount);
  130. error = xfs_btree_update(cur, &rec);
  131. if (error)
  132. trace_xfs_refcount_update_error(cur->bc_mp,
  133. cur->bc_private.a.agno, error, _RET_IP_);
  134. return error;
  135. }
  136. /*
  137. * Insert the record referred to by cur to the value given
  138. * by [bno, len, refcount].
  139. * This either works (return 0) or gets an EFSCORRUPTED error.
  140. */
  141. STATIC int
  142. xfs_refcount_insert(
  143. struct xfs_btree_cur *cur,
  144. struct xfs_refcount_irec *irec,
  145. int *i)
  146. {
  147. int error;
  148. trace_xfs_refcount_insert(cur->bc_mp, cur->bc_private.a.agno, irec);
  149. cur->bc_rec.rc.rc_startblock = irec->rc_startblock;
  150. cur->bc_rec.rc.rc_blockcount = irec->rc_blockcount;
  151. cur->bc_rec.rc.rc_refcount = irec->rc_refcount;
  152. error = xfs_btree_insert(cur, i);
  153. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, *i == 1, out_error);
  154. out_error:
  155. if (error)
  156. trace_xfs_refcount_insert_error(cur->bc_mp,
  157. cur->bc_private.a.agno, error, _RET_IP_);
  158. return error;
  159. }
  160. /*
  161. * Remove the record referred to by cur, then set the pointer to the spot
  162. * where the record could be re-inserted, in case we want to increment or
  163. * decrement the cursor.
  164. * This either works (return 0) or gets an EFSCORRUPTED error.
  165. */
  166. STATIC int
  167. xfs_refcount_delete(
  168. struct xfs_btree_cur *cur,
  169. int *i)
  170. {
  171. struct xfs_refcount_irec irec;
  172. int found_rec;
  173. int error;
  174. error = xfs_refcount_get_rec(cur, &irec, &found_rec);
  175. if (error)
  176. goto out_error;
  177. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  178. trace_xfs_refcount_delete(cur->bc_mp, cur->bc_private.a.agno, &irec);
  179. error = xfs_btree_delete(cur, i);
  180. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, *i == 1, out_error);
  181. if (error)
  182. goto out_error;
  183. error = xfs_refcount_lookup_ge(cur, irec.rc_startblock, &found_rec);
  184. out_error:
  185. if (error)
  186. trace_xfs_refcount_delete_error(cur->bc_mp,
  187. cur->bc_private.a.agno, error, _RET_IP_);
  188. return error;
  189. }
  190. /*
  191. * Adjusting the Reference Count
  192. *
  193. * As stated elsewhere, the reference count btree (refcbt) stores
  194. * >1 reference counts for extents of physical blocks. In this
  195. * operation, we're either raising or lowering the reference count of
  196. * some subrange stored in the tree:
  197. *
  198. * <------ adjustment range ------>
  199. * ----+ +---+-----+ +--+--------+---------
  200. * 2 | | 3 | 4 | |17| 55 | 10
  201. * ----+ +---+-----+ +--+--------+---------
  202. * X axis is physical blocks number;
  203. * reference counts are the numbers inside the rectangles
  204. *
  205. * The first thing we need to do is to ensure that there are no
  206. * refcount extents crossing either boundary of the range to be
  207. * adjusted. For any extent that does cross a boundary, split it into
  208. * two extents so that we can increment the refcount of one of the
  209. * pieces later:
  210. *
  211. * <------ adjustment range ------>
  212. * ----+ +---+-----+ +--+--------+----+----
  213. * 2 | | 3 | 2 | |17| 55 | 10 | 10
  214. * ----+ +---+-----+ +--+--------+----+----
  215. *
  216. * For this next step, let's assume that all the physical blocks in
  217. * the adjustment range are mapped to a file and are therefore in use
  218. * at least once. Therefore, we can infer that any gap in the
  219. * refcount tree within the adjustment range represents a physical
  220. * extent with refcount == 1:
  221. *
  222. * <------ adjustment range ------>
  223. * ----+---+---+-----+-+--+--------+----+----
  224. * 2 |"1"| 3 | 2 |1|17| 55 | 10 | 10
  225. * ----+---+---+-----+-+--+--------+----+----
  226. * ^
  227. *
  228. * For each extent that falls within the interval range, figure out
  229. * which extent is to the left or the right of that extent. Now we
  230. * have a left, current, and right extent. If the new reference count
  231. * of the center extent enables us to merge left, center, and right
  232. * into one record covering all three, do so. If the center extent is
  233. * at the left end of the range, abuts the left extent, and its new
  234. * reference count matches the left extent's record, then merge them.
  235. * If the center extent is at the right end of the range, abuts the
  236. * right extent, and the reference counts match, merge those. In the
  237. * example, we can left merge (assuming an increment operation):
  238. *
  239. * <------ adjustment range ------>
  240. * --------+---+-----+-+--+--------+----+----
  241. * 2 | 3 | 2 |1|17| 55 | 10 | 10
  242. * --------+---+-----+-+--+--------+----+----
  243. * ^
  244. *
  245. * For all other extents within the range, adjust the reference count
  246. * or delete it if the refcount falls below 2. If we were
  247. * incrementing, the end result looks like this:
  248. *
  249. * <------ adjustment range ------>
  250. * --------+---+-----+-+--+--------+----+----
  251. * 2 | 4 | 3 |2|18| 56 | 11 | 10
  252. * --------+---+-----+-+--+--------+----+----
  253. *
  254. * The result of a decrement operation looks as such:
  255. *
  256. * <------ adjustment range ------>
  257. * ----+ +---+ +--+--------+----+----
  258. * 2 | | 2 | |16| 54 | 9 | 10
  259. * ----+ +---+ +--+--------+----+----
  260. * DDDD 111111DD
  261. *
  262. * The blocks marked "D" are freed; the blocks marked "1" are only
  263. * referenced once and therefore the record is removed from the
  264. * refcount btree.
  265. */
  266. /* Next block after this extent. */
  267. static inline xfs_agblock_t
  268. xfs_refc_next(
  269. struct xfs_refcount_irec *rc)
  270. {
  271. return rc->rc_startblock + rc->rc_blockcount;
  272. }
  273. /*
  274. * Split a refcount extent that crosses agbno.
  275. */
  276. STATIC int
  277. xfs_refcount_split_extent(
  278. struct xfs_btree_cur *cur,
  279. xfs_agblock_t agbno,
  280. bool *shape_changed)
  281. {
  282. struct xfs_refcount_irec rcext, tmp;
  283. int found_rec;
  284. int error;
  285. *shape_changed = false;
  286. error = xfs_refcount_lookup_le(cur, agbno, &found_rec);
  287. if (error)
  288. goto out_error;
  289. if (!found_rec)
  290. return 0;
  291. error = xfs_refcount_get_rec(cur, &rcext, &found_rec);
  292. if (error)
  293. goto out_error;
  294. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  295. if (rcext.rc_startblock == agbno || xfs_refc_next(&rcext) <= agbno)
  296. return 0;
  297. *shape_changed = true;
  298. trace_xfs_refcount_split_extent(cur->bc_mp, cur->bc_private.a.agno,
  299. &rcext, agbno);
  300. /* Establish the right extent. */
  301. tmp = rcext;
  302. tmp.rc_startblock = agbno;
  303. tmp.rc_blockcount -= (agbno - rcext.rc_startblock);
  304. error = xfs_refcount_update(cur, &tmp);
  305. if (error)
  306. goto out_error;
  307. /* Insert the left extent. */
  308. tmp = rcext;
  309. tmp.rc_blockcount = agbno - rcext.rc_startblock;
  310. error = xfs_refcount_insert(cur, &tmp, &found_rec);
  311. if (error)
  312. goto out_error;
  313. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  314. return error;
  315. out_error:
  316. trace_xfs_refcount_split_extent_error(cur->bc_mp,
  317. cur->bc_private.a.agno, error, _RET_IP_);
  318. return error;
  319. }
  320. /*
  321. * Merge the left, center, and right extents.
  322. */
  323. STATIC int
  324. xfs_refcount_merge_center_extents(
  325. struct xfs_btree_cur *cur,
  326. struct xfs_refcount_irec *left,
  327. struct xfs_refcount_irec *center,
  328. struct xfs_refcount_irec *right,
  329. unsigned long long extlen,
  330. xfs_agblock_t *agbno,
  331. xfs_extlen_t *aglen)
  332. {
  333. int error;
  334. int found_rec;
  335. trace_xfs_refcount_merge_center_extents(cur->bc_mp,
  336. cur->bc_private.a.agno, left, center, right);
  337. /*
  338. * Make sure the center and right extents are not in the btree.
  339. * If the center extent was synthesized, the first delete call
  340. * removes the right extent and we skip the second deletion.
  341. * If center and right were in the btree, then the first delete
  342. * call removes the center and the second one removes the right
  343. * extent.
  344. */
  345. error = xfs_refcount_lookup_ge(cur, center->rc_startblock,
  346. &found_rec);
  347. if (error)
  348. goto out_error;
  349. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  350. error = xfs_refcount_delete(cur, &found_rec);
  351. if (error)
  352. goto out_error;
  353. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  354. if (center->rc_refcount > 1) {
  355. error = xfs_refcount_delete(cur, &found_rec);
  356. if (error)
  357. goto out_error;
  358. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  359. out_error);
  360. }
  361. /* Enlarge the left extent. */
  362. error = xfs_refcount_lookup_le(cur, left->rc_startblock,
  363. &found_rec);
  364. if (error)
  365. goto out_error;
  366. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  367. left->rc_blockcount = extlen;
  368. error = xfs_refcount_update(cur, left);
  369. if (error)
  370. goto out_error;
  371. *aglen = 0;
  372. return error;
  373. out_error:
  374. trace_xfs_refcount_merge_center_extents_error(cur->bc_mp,
  375. cur->bc_private.a.agno, error, _RET_IP_);
  376. return error;
  377. }
  378. /*
  379. * Merge with the left extent.
  380. */
  381. STATIC int
  382. xfs_refcount_merge_left_extent(
  383. struct xfs_btree_cur *cur,
  384. struct xfs_refcount_irec *left,
  385. struct xfs_refcount_irec *cleft,
  386. xfs_agblock_t *agbno,
  387. xfs_extlen_t *aglen)
  388. {
  389. int error;
  390. int found_rec;
  391. trace_xfs_refcount_merge_left_extent(cur->bc_mp,
  392. cur->bc_private.a.agno, left, cleft);
  393. /* If the extent at agbno (cleft) wasn't synthesized, remove it. */
  394. if (cleft->rc_refcount > 1) {
  395. error = xfs_refcount_lookup_le(cur, cleft->rc_startblock,
  396. &found_rec);
  397. if (error)
  398. goto out_error;
  399. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  400. out_error);
  401. error = xfs_refcount_delete(cur, &found_rec);
  402. if (error)
  403. goto out_error;
  404. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  405. out_error);
  406. }
  407. /* Enlarge the left extent. */
  408. error = xfs_refcount_lookup_le(cur, left->rc_startblock,
  409. &found_rec);
  410. if (error)
  411. goto out_error;
  412. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  413. left->rc_blockcount += cleft->rc_blockcount;
  414. error = xfs_refcount_update(cur, left);
  415. if (error)
  416. goto out_error;
  417. *agbno += cleft->rc_blockcount;
  418. *aglen -= cleft->rc_blockcount;
  419. return error;
  420. out_error:
  421. trace_xfs_refcount_merge_left_extent_error(cur->bc_mp,
  422. cur->bc_private.a.agno, error, _RET_IP_);
  423. return error;
  424. }
  425. /*
  426. * Merge with the right extent.
  427. */
  428. STATIC int
  429. xfs_refcount_merge_right_extent(
  430. struct xfs_btree_cur *cur,
  431. struct xfs_refcount_irec *right,
  432. struct xfs_refcount_irec *cright,
  433. xfs_agblock_t *agbno,
  434. xfs_extlen_t *aglen)
  435. {
  436. int error;
  437. int found_rec;
  438. trace_xfs_refcount_merge_right_extent(cur->bc_mp,
  439. cur->bc_private.a.agno, cright, right);
  440. /*
  441. * If the extent ending at agbno+aglen (cright) wasn't synthesized,
  442. * remove it.
  443. */
  444. if (cright->rc_refcount > 1) {
  445. error = xfs_refcount_lookup_le(cur, cright->rc_startblock,
  446. &found_rec);
  447. if (error)
  448. goto out_error;
  449. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  450. out_error);
  451. error = xfs_refcount_delete(cur, &found_rec);
  452. if (error)
  453. goto out_error;
  454. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  455. out_error);
  456. }
  457. /* Enlarge the right extent. */
  458. error = xfs_refcount_lookup_le(cur, right->rc_startblock,
  459. &found_rec);
  460. if (error)
  461. goto out_error;
  462. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  463. right->rc_startblock -= cright->rc_blockcount;
  464. right->rc_blockcount += cright->rc_blockcount;
  465. error = xfs_refcount_update(cur, right);
  466. if (error)
  467. goto out_error;
  468. *aglen -= cright->rc_blockcount;
  469. return error;
  470. out_error:
  471. trace_xfs_refcount_merge_right_extent_error(cur->bc_mp,
  472. cur->bc_private.a.agno, error, _RET_IP_);
  473. return error;
  474. }
  475. #define XFS_FIND_RCEXT_SHARED 1
  476. #define XFS_FIND_RCEXT_COW 2
  477. /*
  478. * Find the left extent and the one after it (cleft). This function assumes
  479. * that we've already split any extent crossing agbno.
  480. */
  481. STATIC int
  482. xfs_refcount_find_left_extents(
  483. struct xfs_btree_cur *cur,
  484. struct xfs_refcount_irec *left,
  485. struct xfs_refcount_irec *cleft,
  486. xfs_agblock_t agbno,
  487. xfs_extlen_t aglen,
  488. int flags)
  489. {
  490. struct xfs_refcount_irec tmp;
  491. int error;
  492. int found_rec;
  493. left->rc_startblock = cleft->rc_startblock = NULLAGBLOCK;
  494. error = xfs_refcount_lookup_le(cur, agbno - 1, &found_rec);
  495. if (error)
  496. goto out_error;
  497. if (!found_rec)
  498. return 0;
  499. error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
  500. if (error)
  501. goto out_error;
  502. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  503. if (xfs_refc_next(&tmp) != agbno)
  504. return 0;
  505. if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2)
  506. return 0;
  507. if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1)
  508. return 0;
  509. /* We have a left extent; retrieve (or invent) the next right one */
  510. *left = tmp;
  511. error = xfs_btree_increment(cur, 0, &found_rec);
  512. if (error)
  513. goto out_error;
  514. if (found_rec) {
  515. error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
  516. if (error)
  517. goto out_error;
  518. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  519. out_error);
  520. /* if tmp starts at the end of our range, just use that */
  521. if (tmp.rc_startblock == agbno)
  522. *cleft = tmp;
  523. else {
  524. /*
  525. * There's a gap in the refcntbt at the start of the
  526. * range we're interested in (refcount == 1) so
  527. * synthesize the implied extent and pass it back.
  528. * We assume here that the agbno/aglen range was
  529. * passed in from a data fork extent mapping and
  530. * therefore is allocated to exactly one owner.
  531. */
  532. cleft->rc_startblock = agbno;
  533. cleft->rc_blockcount = min(aglen,
  534. tmp.rc_startblock - agbno);
  535. cleft->rc_refcount = 1;
  536. }
  537. } else {
  538. /*
  539. * No extents, so pretend that there's one covering the whole
  540. * range.
  541. */
  542. cleft->rc_startblock = agbno;
  543. cleft->rc_blockcount = aglen;
  544. cleft->rc_refcount = 1;
  545. }
  546. trace_xfs_refcount_find_left_extent(cur->bc_mp, cur->bc_private.a.agno,
  547. left, cleft, agbno);
  548. return error;
  549. out_error:
  550. trace_xfs_refcount_find_left_extent_error(cur->bc_mp,
  551. cur->bc_private.a.agno, error, _RET_IP_);
  552. return error;
  553. }
  554. /*
  555. * Find the right extent and the one before it (cright). This function
  556. * assumes that we've already split any extents crossing agbno + aglen.
  557. */
  558. STATIC int
  559. xfs_refcount_find_right_extents(
  560. struct xfs_btree_cur *cur,
  561. struct xfs_refcount_irec *right,
  562. struct xfs_refcount_irec *cright,
  563. xfs_agblock_t agbno,
  564. xfs_extlen_t aglen,
  565. int flags)
  566. {
  567. struct xfs_refcount_irec tmp;
  568. int error;
  569. int found_rec;
  570. right->rc_startblock = cright->rc_startblock = NULLAGBLOCK;
  571. error = xfs_refcount_lookup_ge(cur, agbno + aglen, &found_rec);
  572. if (error)
  573. goto out_error;
  574. if (!found_rec)
  575. return 0;
  576. error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
  577. if (error)
  578. goto out_error;
  579. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1, out_error);
  580. if (tmp.rc_startblock != agbno + aglen)
  581. return 0;
  582. if ((flags & XFS_FIND_RCEXT_SHARED) && tmp.rc_refcount < 2)
  583. return 0;
  584. if ((flags & XFS_FIND_RCEXT_COW) && tmp.rc_refcount > 1)
  585. return 0;
  586. /* We have a right extent; retrieve (or invent) the next left one */
  587. *right = tmp;
  588. error = xfs_btree_decrement(cur, 0, &found_rec);
  589. if (error)
  590. goto out_error;
  591. if (found_rec) {
  592. error = xfs_refcount_get_rec(cur, &tmp, &found_rec);
  593. if (error)
  594. goto out_error;
  595. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, found_rec == 1,
  596. out_error);
  597. /* if tmp ends at the end of our range, just use that */
  598. if (xfs_refc_next(&tmp) == agbno + aglen)
  599. *cright = tmp;
  600. else {
  601. /*
  602. * There's a gap in the refcntbt at the end of the
  603. * range we're interested in (refcount == 1) so
  604. * create the implied extent and pass it back.
  605. * We assume here that the agbno/aglen range was
  606. * passed in from a data fork extent mapping and
  607. * therefore is allocated to exactly one owner.
  608. */
  609. cright->rc_startblock = max(agbno, xfs_refc_next(&tmp));
  610. cright->rc_blockcount = right->rc_startblock -
  611. cright->rc_startblock;
  612. cright->rc_refcount = 1;
  613. }
  614. } else {
  615. /*
  616. * No extents, so pretend that there's one covering the whole
  617. * range.
  618. */
  619. cright->rc_startblock = agbno;
  620. cright->rc_blockcount = aglen;
  621. cright->rc_refcount = 1;
  622. }
  623. trace_xfs_refcount_find_right_extent(cur->bc_mp, cur->bc_private.a.agno,
  624. cright, right, agbno + aglen);
  625. return error;
  626. out_error:
  627. trace_xfs_refcount_find_right_extent_error(cur->bc_mp,
  628. cur->bc_private.a.agno, error, _RET_IP_);
  629. return error;
  630. }
  631. /* Is this extent valid? */
  632. static inline bool
  633. xfs_refc_valid(
  634. struct xfs_refcount_irec *rc)
  635. {
  636. return rc->rc_startblock != NULLAGBLOCK;
  637. }
  638. /*
  639. * Try to merge with any extents on the boundaries of the adjustment range.
  640. */
  641. STATIC int
  642. xfs_refcount_merge_extents(
  643. struct xfs_btree_cur *cur,
  644. xfs_agblock_t *agbno,
  645. xfs_extlen_t *aglen,
  646. enum xfs_refc_adjust_op adjust,
  647. int flags,
  648. bool *shape_changed)
  649. {
  650. struct xfs_refcount_irec left = {0}, cleft = {0};
  651. struct xfs_refcount_irec cright = {0}, right = {0};
  652. int error;
  653. unsigned long long ulen;
  654. bool cequal;
  655. *shape_changed = false;
  656. /*
  657. * Find the extent just below agbno [left], just above agbno [cleft],
  658. * just below (agbno + aglen) [cright], and just above (agbno + aglen)
  659. * [right].
  660. */
  661. error = xfs_refcount_find_left_extents(cur, &left, &cleft, *agbno,
  662. *aglen, flags);
  663. if (error)
  664. return error;
  665. error = xfs_refcount_find_right_extents(cur, &right, &cright, *agbno,
  666. *aglen, flags);
  667. if (error)
  668. return error;
  669. /* No left or right extent to merge; exit. */
  670. if (!xfs_refc_valid(&left) && !xfs_refc_valid(&right))
  671. return 0;
  672. cequal = (cleft.rc_startblock == cright.rc_startblock) &&
  673. (cleft.rc_blockcount == cright.rc_blockcount);
  674. /* Try to merge left, cleft, and right. cleft must == cright. */
  675. ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount +
  676. right.rc_blockcount;
  677. if (xfs_refc_valid(&left) && xfs_refc_valid(&right) &&
  678. xfs_refc_valid(&cleft) && xfs_refc_valid(&cright) && cequal &&
  679. left.rc_refcount == cleft.rc_refcount + adjust &&
  680. right.rc_refcount == cleft.rc_refcount + adjust &&
  681. ulen < MAXREFCEXTLEN) {
  682. *shape_changed = true;
  683. return xfs_refcount_merge_center_extents(cur, &left, &cleft,
  684. &right, ulen, agbno, aglen);
  685. }
  686. /* Try to merge left and cleft. */
  687. ulen = (unsigned long long)left.rc_blockcount + cleft.rc_blockcount;
  688. if (xfs_refc_valid(&left) && xfs_refc_valid(&cleft) &&
  689. left.rc_refcount == cleft.rc_refcount + adjust &&
  690. ulen < MAXREFCEXTLEN) {
  691. *shape_changed = true;
  692. error = xfs_refcount_merge_left_extent(cur, &left, &cleft,
  693. agbno, aglen);
  694. if (error)
  695. return error;
  696. /*
  697. * If we just merged left + cleft and cleft == cright,
  698. * we no longer have a cright to merge with right. We're done.
  699. */
  700. if (cequal)
  701. return 0;
  702. }
  703. /* Try to merge cright and right. */
  704. ulen = (unsigned long long)right.rc_blockcount + cright.rc_blockcount;
  705. if (xfs_refc_valid(&right) && xfs_refc_valid(&cright) &&
  706. right.rc_refcount == cright.rc_refcount + adjust &&
  707. ulen < MAXREFCEXTLEN) {
  708. *shape_changed = true;
  709. return xfs_refcount_merge_right_extent(cur, &right, &cright,
  710. agbno, aglen);
  711. }
  712. return error;
  713. }
  714. /*
  715. * While we're adjusting the refcounts records of an extent, we have
  716. * to keep an eye on the number of extents we're dirtying -- run too
  717. * many in a single transaction and we'll exceed the transaction's
  718. * reservation and crash the fs. Each record adds 12 bytes to the
  719. * log (plus any key updates) so we'll conservatively assume 24 bytes
  720. * per record. We must also leave space for btree splits on both ends
  721. * of the range and space for the CUD and a new CUI.
  722. *
  723. * XXX: This is a pretty hand-wavy estimate. The penalty for guessing
  724. * true incorrectly is a shutdown FS; the penalty for guessing false
  725. * incorrectly is more transaction rolls than might be necessary.
  726. * Be conservative here.
  727. */
  728. static bool
  729. xfs_refcount_still_have_space(
  730. struct xfs_btree_cur *cur)
  731. {
  732. unsigned long overhead;
  733. overhead = cur->bc_private.a.priv.refc.shape_changes *
  734. xfs_allocfree_log_count(cur->bc_mp, 1);
  735. overhead *= cur->bc_mp->m_sb.sb_blocksize;
  736. /*
  737. * Only allow 2 refcount extent updates per transaction if the
  738. * refcount continue update "error" has been injected.
  739. */
  740. if (cur->bc_private.a.priv.refc.nr_ops > 2 &&
  741. XFS_TEST_ERROR(false, cur->bc_mp,
  742. XFS_ERRTAG_REFCOUNT_CONTINUE_UPDATE,
  743. XFS_RANDOM_REFCOUNT_CONTINUE_UPDATE))
  744. return false;
  745. if (cur->bc_private.a.priv.refc.nr_ops == 0)
  746. return true;
  747. else if (overhead > cur->bc_tp->t_log_res)
  748. return false;
  749. return cur->bc_tp->t_log_res - overhead >
  750. cur->bc_private.a.priv.refc.nr_ops * 32;
  751. }
  752. /*
  753. * Adjust the refcounts of middle extents. At this point we should have
  754. * split extents that crossed the adjustment range; merged with adjacent
  755. * extents; and updated agbno/aglen to reflect the merges. Therefore,
  756. * all we have to do is update the extents inside [agbno, agbno + aglen].
  757. */
  758. STATIC int
  759. xfs_refcount_adjust_extents(
  760. struct xfs_btree_cur *cur,
  761. xfs_agblock_t *agbno,
  762. xfs_extlen_t *aglen,
  763. enum xfs_refc_adjust_op adj,
  764. struct xfs_defer_ops *dfops,
  765. struct xfs_owner_info *oinfo)
  766. {
  767. struct xfs_refcount_irec ext, tmp;
  768. int error;
  769. int found_rec, found_tmp;
  770. xfs_fsblock_t fsbno;
  771. /* Merging did all the work already. */
  772. if (*aglen == 0)
  773. return 0;
  774. error = xfs_refcount_lookup_ge(cur, *agbno, &found_rec);
  775. if (error)
  776. goto out_error;
  777. while (*aglen > 0 && xfs_refcount_still_have_space(cur)) {
  778. error = xfs_refcount_get_rec(cur, &ext, &found_rec);
  779. if (error)
  780. goto out_error;
  781. if (!found_rec) {
  782. ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks;
  783. ext.rc_blockcount = 0;
  784. ext.rc_refcount = 0;
  785. }
  786. /*
  787. * Deal with a hole in the refcount tree; if a file maps to
  788. * these blocks and there's no refcountbt record, pretend that
  789. * there is one with refcount == 1.
  790. */
  791. if (ext.rc_startblock != *agbno) {
  792. tmp.rc_startblock = *agbno;
  793. tmp.rc_blockcount = min(*aglen,
  794. ext.rc_startblock - *agbno);
  795. tmp.rc_refcount = 1 + adj;
  796. trace_xfs_refcount_modify_extent(cur->bc_mp,
  797. cur->bc_private.a.agno, &tmp);
  798. /*
  799. * Either cover the hole (increment) or
  800. * delete the range (decrement).
  801. */
  802. if (tmp.rc_refcount) {
  803. error = xfs_refcount_insert(cur, &tmp,
  804. &found_tmp);
  805. if (error)
  806. goto out_error;
  807. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  808. found_tmp == 1, out_error);
  809. cur->bc_private.a.priv.refc.nr_ops++;
  810. } else {
  811. fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
  812. cur->bc_private.a.agno,
  813. tmp.rc_startblock);
  814. xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
  815. tmp.rc_blockcount, oinfo);
  816. }
  817. (*agbno) += tmp.rc_blockcount;
  818. (*aglen) -= tmp.rc_blockcount;
  819. error = xfs_refcount_lookup_ge(cur, *agbno,
  820. &found_rec);
  821. if (error)
  822. goto out_error;
  823. }
  824. /* Stop if there's nothing left to modify */
  825. if (*aglen == 0 || !xfs_refcount_still_have_space(cur))
  826. break;
  827. /*
  828. * Adjust the reference count and either update the tree
  829. * (incr) or free the blocks (decr).
  830. */
  831. if (ext.rc_refcount == MAXREFCOUNT)
  832. goto skip;
  833. ext.rc_refcount += adj;
  834. trace_xfs_refcount_modify_extent(cur->bc_mp,
  835. cur->bc_private.a.agno, &ext);
  836. if (ext.rc_refcount > 1) {
  837. error = xfs_refcount_update(cur, &ext);
  838. if (error)
  839. goto out_error;
  840. cur->bc_private.a.priv.refc.nr_ops++;
  841. } else if (ext.rc_refcount == 1) {
  842. error = xfs_refcount_delete(cur, &found_rec);
  843. if (error)
  844. goto out_error;
  845. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  846. found_rec == 1, out_error);
  847. cur->bc_private.a.priv.refc.nr_ops++;
  848. goto advloop;
  849. } else {
  850. fsbno = XFS_AGB_TO_FSB(cur->bc_mp,
  851. cur->bc_private.a.agno,
  852. ext.rc_startblock);
  853. xfs_bmap_add_free(cur->bc_mp, dfops, fsbno,
  854. ext.rc_blockcount, oinfo);
  855. }
  856. skip:
  857. error = xfs_btree_increment(cur, 0, &found_rec);
  858. if (error)
  859. goto out_error;
  860. advloop:
  861. (*agbno) += ext.rc_blockcount;
  862. (*aglen) -= ext.rc_blockcount;
  863. }
  864. return error;
  865. out_error:
  866. trace_xfs_refcount_modify_extent_error(cur->bc_mp,
  867. cur->bc_private.a.agno, error, _RET_IP_);
  868. return error;
  869. }
  870. /* Adjust the reference count of a range of AG blocks. */
  871. STATIC int
  872. xfs_refcount_adjust(
  873. struct xfs_btree_cur *cur,
  874. xfs_agblock_t agbno,
  875. xfs_extlen_t aglen,
  876. xfs_agblock_t *new_agbno,
  877. xfs_extlen_t *new_aglen,
  878. enum xfs_refc_adjust_op adj,
  879. struct xfs_defer_ops *dfops,
  880. struct xfs_owner_info *oinfo)
  881. {
  882. bool shape_changed;
  883. int shape_changes = 0;
  884. int error;
  885. *new_agbno = agbno;
  886. *new_aglen = aglen;
  887. if (adj == XFS_REFCOUNT_ADJUST_INCREASE)
  888. trace_xfs_refcount_increase(cur->bc_mp, cur->bc_private.a.agno,
  889. agbno, aglen);
  890. else
  891. trace_xfs_refcount_decrease(cur->bc_mp, cur->bc_private.a.agno,
  892. agbno, aglen);
  893. /*
  894. * Ensure that no rcextents cross the boundary of the adjustment range.
  895. */
  896. error = xfs_refcount_split_extent(cur, agbno, &shape_changed);
  897. if (error)
  898. goto out_error;
  899. if (shape_changed)
  900. shape_changes++;
  901. error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed);
  902. if (error)
  903. goto out_error;
  904. if (shape_changed)
  905. shape_changes++;
  906. /*
  907. * Try to merge with the left or right extents of the range.
  908. */
  909. error = xfs_refcount_merge_extents(cur, new_agbno, new_aglen, adj,
  910. XFS_FIND_RCEXT_SHARED, &shape_changed);
  911. if (error)
  912. goto out_error;
  913. if (shape_changed)
  914. shape_changes++;
  915. if (shape_changes)
  916. cur->bc_private.a.priv.refc.shape_changes++;
  917. /* Now that we've taken care of the ends, adjust the middle extents */
  918. error = xfs_refcount_adjust_extents(cur, new_agbno, new_aglen,
  919. adj, dfops, oinfo);
  920. if (error)
  921. goto out_error;
  922. return 0;
  923. out_error:
  924. trace_xfs_refcount_adjust_error(cur->bc_mp, cur->bc_private.a.agno,
  925. error, _RET_IP_);
  926. return error;
  927. }
  928. /* Clean up after calling xfs_refcount_finish_one. */
  929. void
  930. xfs_refcount_finish_one_cleanup(
  931. struct xfs_trans *tp,
  932. struct xfs_btree_cur *rcur,
  933. int error)
  934. {
  935. struct xfs_buf *agbp;
  936. if (rcur == NULL)
  937. return;
  938. agbp = rcur->bc_private.a.agbp;
  939. xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
  940. if (error)
  941. xfs_trans_brelse(tp, agbp);
  942. }
  943. /*
  944. * Process one of the deferred refcount operations. We pass back the
  945. * btree cursor to maintain our lock on the btree between calls.
  946. * This saves time and eliminates a buffer deadlock between the
  947. * superblock and the AGF because we'll always grab them in the same
  948. * order.
  949. */
  950. int
  951. xfs_refcount_finish_one(
  952. struct xfs_trans *tp,
  953. struct xfs_defer_ops *dfops,
  954. enum xfs_refcount_intent_type type,
  955. xfs_fsblock_t startblock,
  956. xfs_extlen_t blockcount,
  957. xfs_fsblock_t *new_fsb,
  958. xfs_extlen_t *new_len,
  959. struct xfs_btree_cur **pcur)
  960. {
  961. struct xfs_mount *mp = tp->t_mountp;
  962. struct xfs_btree_cur *rcur;
  963. struct xfs_buf *agbp = NULL;
  964. int error = 0;
  965. xfs_agnumber_t agno;
  966. xfs_agblock_t bno;
  967. xfs_agblock_t new_agbno;
  968. unsigned long nr_ops = 0;
  969. int shape_changes = 0;
  970. agno = XFS_FSB_TO_AGNO(mp, startblock);
  971. ASSERT(agno != NULLAGNUMBER);
  972. bno = XFS_FSB_TO_AGBNO(mp, startblock);
  973. trace_xfs_refcount_deferred(mp, XFS_FSB_TO_AGNO(mp, startblock),
  974. type, XFS_FSB_TO_AGBNO(mp, startblock),
  975. blockcount);
  976. if (XFS_TEST_ERROR(false, mp,
  977. XFS_ERRTAG_REFCOUNT_FINISH_ONE,
  978. XFS_RANDOM_REFCOUNT_FINISH_ONE))
  979. return -EIO;
  980. /*
  981. * If we haven't gotten a cursor or the cursor AG doesn't match
  982. * the startblock, get one now.
  983. */
  984. rcur = *pcur;
  985. if (rcur != NULL && rcur->bc_private.a.agno != agno) {
  986. nr_ops = rcur->bc_private.a.priv.refc.nr_ops;
  987. shape_changes = rcur->bc_private.a.priv.refc.shape_changes;
  988. xfs_refcount_finish_one_cleanup(tp, rcur, 0);
  989. rcur = NULL;
  990. *pcur = NULL;
  991. }
  992. if (rcur == NULL) {
  993. error = xfs_alloc_read_agf(tp->t_mountp, tp, agno,
  994. XFS_ALLOC_FLAG_FREEING, &agbp);
  995. if (error)
  996. return error;
  997. if (!agbp)
  998. return -EFSCORRUPTED;
  999. rcur = xfs_refcountbt_init_cursor(mp, tp, agbp, agno, dfops);
  1000. if (!rcur) {
  1001. error = -ENOMEM;
  1002. goto out_cur;
  1003. }
  1004. rcur->bc_private.a.priv.refc.nr_ops = nr_ops;
  1005. rcur->bc_private.a.priv.refc.shape_changes = shape_changes;
  1006. }
  1007. *pcur = rcur;
  1008. switch (type) {
  1009. case XFS_REFCOUNT_INCREASE:
  1010. error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
  1011. new_len, XFS_REFCOUNT_ADJUST_INCREASE, dfops, NULL);
  1012. *new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
  1013. break;
  1014. case XFS_REFCOUNT_DECREASE:
  1015. error = xfs_refcount_adjust(rcur, bno, blockcount, &new_agbno,
  1016. new_len, XFS_REFCOUNT_ADJUST_DECREASE, dfops, NULL);
  1017. *new_fsb = XFS_AGB_TO_FSB(mp, agno, new_agbno);
  1018. break;
  1019. case XFS_REFCOUNT_ALLOC_COW:
  1020. *new_fsb = startblock + blockcount;
  1021. *new_len = 0;
  1022. error = __xfs_refcount_cow_alloc(rcur, bno, blockcount, dfops);
  1023. break;
  1024. case XFS_REFCOUNT_FREE_COW:
  1025. *new_fsb = startblock + blockcount;
  1026. *new_len = 0;
  1027. error = __xfs_refcount_cow_free(rcur, bno, blockcount, dfops);
  1028. break;
  1029. default:
  1030. ASSERT(0);
  1031. error = -EFSCORRUPTED;
  1032. }
  1033. if (!error && *new_len > 0)
  1034. trace_xfs_refcount_finish_one_leftover(mp, agno, type,
  1035. bno, blockcount, new_agbno, *new_len);
  1036. return error;
  1037. out_cur:
  1038. xfs_trans_brelse(tp, agbp);
  1039. return error;
  1040. }
  1041. /*
  1042. * Record a refcount intent for later processing.
  1043. */
  1044. static int
  1045. __xfs_refcount_add(
  1046. struct xfs_mount *mp,
  1047. struct xfs_defer_ops *dfops,
  1048. enum xfs_refcount_intent_type type,
  1049. xfs_fsblock_t startblock,
  1050. xfs_extlen_t blockcount)
  1051. {
  1052. struct xfs_refcount_intent *ri;
  1053. trace_xfs_refcount_defer(mp, XFS_FSB_TO_AGNO(mp, startblock),
  1054. type, XFS_FSB_TO_AGBNO(mp, startblock),
  1055. blockcount);
  1056. ri = kmem_alloc(sizeof(struct xfs_refcount_intent),
  1057. KM_SLEEP | KM_NOFS);
  1058. INIT_LIST_HEAD(&ri->ri_list);
  1059. ri->ri_type = type;
  1060. ri->ri_startblock = startblock;
  1061. ri->ri_blockcount = blockcount;
  1062. xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_REFCOUNT, &ri->ri_list);
  1063. return 0;
  1064. }
  1065. /*
  1066. * Increase the reference count of the blocks backing a file's extent.
  1067. */
  1068. int
  1069. xfs_refcount_increase_extent(
  1070. struct xfs_mount *mp,
  1071. struct xfs_defer_ops *dfops,
  1072. struct xfs_bmbt_irec *PREV)
  1073. {
  1074. if (!xfs_sb_version_hasreflink(&mp->m_sb))
  1075. return 0;
  1076. return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_INCREASE,
  1077. PREV->br_startblock, PREV->br_blockcount);
  1078. }
  1079. /*
  1080. * Decrease the reference count of the blocks backing a file's extent.
  1081. */
  1082. int
  1083. xfs_refcount_decrease_extent(
  1084. struct xfs_mount *mp,
  1085. struct xfs_defer_ops *dfops,
  1086. struct xfs_bmbt_irec *PREV)
  1087. {
  1088. if (!xfs_sb_version_hasreflink(&mp->m_sb))
  1089. return 0;
  1090. return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_DECREASE,
  1091. PREV->br_startblock, PREV->br_blockcount);
  1092. }
  1093. /*
  1094. * Given an AG extent, find the lowest-numbered run of shared blocks
  1095. * within that range and return the range in fbno/flen. If
  1096. * find_end_of_shared is set, return the longest contiguous extent of
  1097. * shared blocks; if not, just return the first extent we find. If no
  1098. * shared blocks are found, fbno and flen will be set to NULLAGBLOCK
  1099. * and 0, respectively.
  1100. */
  1101. int
  1102. xfs_refcount_find_shared(
  1103. struct xfs_btree_cur *cur,
  1104. xfs_agblock_t agbno,
  1105. xfs_extlen_t aglen,
  1106. xfs_agblock_t *fbno,
  1107. xfs_extlen_t *flen,
  1108. bool find_end_of_shared)
  1109. {
  1110. struct xfs_refcount_irec tmp;
  1111. int i;
  1112. int have;
  1113. int error;
  1114. trace_xfs_refcount_find_shared(cur->bc_mp, cur->bc_private.a.agno,
  1115. agbno, aglen);
  1116. /* By default, skip the whole range */
  1117. *fbno = NULLAGBLOCK;
  1118. *flen = 0;
  1119. /* Try to find a refcount extent that crosses the start */
  1120. error = xfs_refcount_lookup_le(cur, agbno, &have);
  1121. if (error)
  1122. goto out_error;
  1123. if (!have) {
  1124. /* No left extent, look at the next one */
  1125. error = xfs_btree_increment(cur, 0, &have);
  1126. if (error)
  1127. goto out_error;
  1128. if (!have)
  1129. goto done;
  1130. }
  1131. error = xfs_refcount_get_rec(cur, &tmp, &i);
  1132. if (error)
  1133. goto out_error;
  1134. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
  1135. /* If the extent ends before the start, look at the next one */
  1136. if (tmp.rc_startblock + tmp.rc_blockcount <= agbno) {
  1137. error = xfs_btree_increment(cur, 0, &have);
  1138. if (error)
  1139. goto out_error;
  1140. if (!have)
  1141. goto done;
  1142. error = xfs_refcount_get_rec(cur, &tmp, &i);
  1143. if (error)
  1144. goto out_error;
  1145. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
  1146. }
  1147. /* If the extent starts after the range we want, bail out */
  1148. if (tmp.rc_startblock >= agbno + aglen)
  1149. goto done;
  1150. /* We found the start of a shared extent! */
  1151. if (tmp.rc_startblock < agbno) {
  1152. tmp.rc_blockcount -= (agbno - tmp.rc_startblock);
  1153. tmp.rc_startblock = agbno;
  1154. }
  1155. *fbno = tmp.rc_startblock;
  1156. *flen = min(tmp.rc_blockcount, agbno + aglen - *fbno);
  1157. if (!find_end_of_shared)
  1158. goto done;
  1159. /* Otherwise, find the end of this shared extent */
  1160. while (*fbno + *flen < agbno + aglen) {
  1161. error = xfs_btree_increment(cur, 0, &have);
  1162. if (error)
  1163. goto out_error;
  1164. if (!have)
  1165. break;
  1166. error = xfs_refcount_get_rec(cur, &tmp, &i);
  1167. if (error)
  1168. goto out_error;
  1169. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, out_error);
  1170. if (tmp.rc_startblock >= agbno + aglen ||
  1171. tmp.rc_startblock != *fbno + *flen)
  1172. break;
  1173. *flen = min(*flen + tmp.rc_blockcount, agbno + aglen - *fbno);
  1174. }
  1175. done:
  1176. trace_xfs_refcount_find_shared_result(cur->bc_mp,
  1177. cur->bc_private.a.agno, *fbno, *flen);
  1178. out_error:
  1179. if (error)
  1180. trace_xfs_refcount_find_shared_error(cur->bc_mp,
  1181. cur->bc_private.a.agno, error, _RET_IP_);
  1182. return error;
  1183. }
  1184. /*
  1185. * Recovering CoW Blocks After a Crash
  1186. *
  1187. * Due to the way that the copy on write mechanism works, there's a window of
  1188. * opportunity in which we can lose track of allocated blocks during a crash.
  1189. * Because CoW uses delayed allocation in the in-core CoW fork, writeback
  1190. * causes blocks to be allocated and stored in the CoW fork. The blocks are
  1191. * no longer in the free space btree but are not otherwise recorded anywhere
  1192. * until the write completes and the blocks are mapped into the file. A crash
  1193. * in between allocation and remapping results in the replacement blocks being
  1194. * lost. This situation is exacerbated by the CoW extent size hint because
  1195. * allocations can hang around for long time.
  1196. *
  1197. * However, there is a place where we can record these allocations before they
  1198. * become mappings -- the reference count btree. The btree does not record
  1199. * extents with refcount == 1, so we can record allocations with a refcount of
  1200. * 1. Blocks being used for CoW writeout cannot be shared, so there should be
  1201. * no conflict with shared block records. These mappings should be created
  1202. * when we allocate blocks to the CoW fork and deleted when they're removed
  1203. * from the CoW fork.
  1204. *
  1205. * Minor nit: records for in-progress CoW allocations and records for shared
  1206. * extents must never be merged, to preserve the property that (except for CoW
  1207. * allocations) there are no refcount btree entries with refcount == 1. The
  1208. * only time this could potentially happen is when unsharing a block that's
  1209. * adjacent to CoW allocations, so we must be careful to avoid this.
  1210. *
  1211. * At mount time we recover lost CoW allocations by searching the refcount
  1212. * btree for these refcount == 1 mappings. These represent CoW allocations
  1213. * that were in progress at the time the filesystem went down, so we can free
  1214. * them to get the space back.
  1215. *
  1216. * This mechanism is superior to creating EFIs for unmapped CoW extents for
  1217. * several reasons -- first, EFIs pin the tail of the log and would have to be
  1218. * periodically relogged to avoid filling up the log. Second, CoW completions
  1219. * will have to file an EFD and create new EFIs for whatever remains in the
  1220. * CoW fork; this partially takes care of (1) but extent-size reservations
  1221. * will have to periodically relog even if there's no writeout in progress.
  1222. * This can happen if the CoW extent size hint is set, which you really want.
  1223. * Third, EFIs cannot currently be automatically relogged into newer
  1224. * transactions to advance the log tail. Fourth, stuffing the log full of
  1225. * EFIs places an upper bound on the number of CoW allocations that can be
  1226. * held filesystem-wide at any given time. Recording them in the refcount
  1227. * btree doesn't require us to maintain any state in memory and doesn't pin
  1228. * the log.
  1229. */
  1230. /*
  1231. * Adjust the refcounts of CoW allocations. These allocations are "magic"
  1232. * in that they're not referenced anywhere else in the filesystem, so we
  1233. * stash them in the refcount btree with a refcount of 1 until either file
  1234. * remapping (or CoW cancellation) happens.
  1235. */
  1236. STATIC int
  1237. xfs_refcount_adjust_cow_extents(
  1238. struct xfs_btree_cur *cur,
  1239. xfs_agblock_t agbno,
  1240. xfs_extlen_t aglen,
  1241. enum xfs_refc_adjust_op adj,
  1242. struct xfs_defer_ops *dfops,
  1243. struct xfs_owner_info *oinfo)
  1244. {
  1245. struct xfs_refcount_irec ext, tmp;
  1246. int error;
  1247. int found_rec, found_tmp;
  1248. if (aglen == 0)
  1249. return 0;
  1250. /* Find any overlapping refcount records */
  1251. error = xfs_refcount_lookup_ge(cur, agbno, &found_rec);
  1252. if (error)
  1253. goto out_error;
  1254. error = xfs_refcount_get_rec(cur, &ext, &found_rec);
  1255. if (error)
  1256. goto out_error;
  1257. if (!found_rec) {
  1258. ext.rc_startblock = cur->bc_mp->m_sb.sb_agblocks +
  1259. XFS_REFC_COW_START;
  1260. ext.rc_blockcount = 0;
  1261. ext.rc_refcount = 0;
  1262. }
  1263. switch (adj) {
  1264. case XFS_REFCOUNT_ADJUST_COW_ALLOC:
  1265. /* Adding a CoW reservation, there should be nothing here. */
  1266. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  1267. ext.rc_startblock >= agbno + aglen, out_error);
  1268. tmp.rc_startblock = agbno;
  1269. tmp.rc_blockcount = aglen;
  1270. tmp.rc_refcount = 1;
  1271. trace_xfs_refcount_modify_extent(cur->bc_mp,
  1272. cur->bc_private.a.agno, &tmp);
  1273. error = xfs_refcount_insert(cur, &tmp,
  1274. &found_tmp);
  1275. if (error)
  1276. goto out_error;
  1277. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  1278. found_tmp == 1, out_error);
  1279. break;
  1280. case XFS_REFCOUNT_ADJUST_COW_FREE:
  1281. /* Removing a CoW reservation, there should be one extent. */
  1282. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  1283. ext.rc_startblock == agbno, out_error);
  1284. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  1285. ext.rc_blockcount == aglen, out_error);
  1286. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  1287. ext.rc_refcount == 1, out_error);
  1288. ext.rc_refcount = 0;
  1289. trace_xfs_refcount_modify_extent(cur->bc_mp,
  1290. cur->bc_private.a.agno, &ext);
  1291. error = xfs_refcount_delete(cur, &found_rec);
  1292. if (error)
  1293. goto out_error;
  1294. XFS_WANT_CORRUPTED_GOTO(cur->bc_mp,
  1295. found_rec == 1, out_error);
  1296. break;
  1297. default:
  1298. ASSERT(0);
  1299. }
  1300. return error;
  1301. out_error:
  1302. trace_xfs_refcount_modify_extent_error(cur->bc_mp,
  1303. cur->bc_private.a.agno, error, _RET_IP_);
  1304. return error;
  1305. }
  1306. /*
  1307. * Add or remove refcount btree entries for CoW reservations.
  1308. */
  1309. STATIC int
  1310. xfs_refcount_adjust_cow(
  1311. struct xfs_btree_cur *cur,
  1312. xfs_agblock_t agbno,
  1313. xfs_extlen_t aglen,
  1314. enum xfs_refc_adjust_op adj,
  1315. struct xfs_defer_ops *dfops)
  1316. {
  1317. bool shape_changed;
  1318. int error;
  1319. agbno += XFS_REFC_COW_START;
  1320. /*
  1321. * Ensure that no rcextents cross the boundary of the adjustment range.
  1322. */
  1323. error = xfs_refcount_split_extent(cur, agbno, &shape_changed);
  1324. if (error)
  1325. goto out_error;
  1326. error = xfs_refcount_split_extent(cur, agbno + aglen, &shape_changed);
  1327. if (error)
  1328. goto out_error;
  1329. /*
  1330. * Try to merge with the left or right extents of the range.
  1331. */
  1332. error = xfs_refcount_merge_extents(cur, &agbno, &aglen, adj,
  1333. XFS_FIND_RCEXT_COW, &shape_changed);
  1334. if (error)
  1335. goto out_error;
  1336. /* Now that we've taken care of the ends, adjust the middle extents */
  1337. error = xfs_refcount_adjust_cow_extents(cur, agbno, aglen, adj,
  1338. dfops, NULL);
  1339. if (error)
  1340. goto out_error;
  1341. return 0;
  1342. out_error:
  1343. trace_xfs_refcount_adjust_cow_error(cur->bc_mp, cur->bc_private.a.agno,
  1344. error, _RET_IP_);
  1345. return error;
  1346. }
  1347. /*
  1348. * Record a CoW allocation in the refcount btree.
  1349. */
  1350. STATIC int
  1351. __xfs_refcount_cow_alloc(
  1352. struct xfs_btree_cur *rcur,
  1353. xfs_agblock_t agbno,
  1354. xfs_extlen_t aglen,
  1355. struct xfs_defer_ops *dfops)
  1356. {
  1357. int error;
  1358. trace_xfs_refcount_cow_increase(rcur->bc_mp, rcur->bc_private.a.agno,
  1359. agbno, aglen);
  1360. /* Add refcount btree reservation */
  1361. error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
  1362. XFS_REFCOUNT_ADJUST_COW_ALLOC, dfops);
  1363. if (error)
  1364. return error;
  1365. /* Add rmap entry */
  1366. if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
  1367. error = xfs_rmap_alloc_extent(rcur->bc_mp, dfops,
  1368. rcur->bc_private.a.agno,
  1369. agbno, aglen, XFS_RMAP_OWN_COW);
  1370. if (error)
  1371. return error;
  1372. }
  1373. return error;
  1374. }
  1375. /*
  1376. * Remove a CoW allocation from the refcount btree.
  1377. */
  1378. STATIC int
  1379. __xfs_refcount_cow_free(
  1380. struct xfs_btree_cur *rcur,
  1381. xfs_agblock_t agbno,
  1382. xfs_extlen_t aglen,
  1383. struct xfs_defer_ops *dfops)
  1384. {
  1385. int error;
  1386. trace_xfs_refcount_cow_decrease(rcur->bc_mp, rcur->bc_private.a.agno,
  1387. agbno, aglen);
  1388. /* Remove refcount btree reservation */
  1389. error = xfs_refcount_adjust_cow(rcur, agbno, aglen,
  1390. XFS_REFCOUNT_ADJUST_COW_FREE, dfops);
  1391. if (error)
  1392. return error;
  1393. /* Remove rmap entry */
  1394. if (xfs_sb_version_hasrmapbt(&rcur->bc_mp->m_sb)) {
  1395. error = xfs_rmap_free_extent(rcur->bc_mp, dfops,
  1396. rcur->bc_private.a.agno,
  1397. agbno, aglen, XFS_RMAP_OWN_COW);
  1398. if (error)
  1399. return error;
  1400. }
  1401. return error;
  1402. }
  1403. /* Record a CoW staging extent in the refcount btree. */
  1404. int
  1405. xfs_refcount_alloc_cow_extent(
  1406. struct xfs_mount *mp,
  1407. struct xfs_defer_ops *dfops,
  1408. xfs_fsblock_t fsb,
  1409. xfs_extlen_t len)
  1410. {
  1411. if (!xfs_sb_version_hasreflink(&mp->m_sb))
  1412. return 0;
  1413. return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_ALLOC_COW,
  1414. fsb, len);
  1415. }
  1416. /* Forget a CoW staging event in the refcount btree. */
  1417. int
  1418. xfs_refcount_free_cow_extent(
  1419. struct xfs_mount *mp,
  1420. struct xfs_defer_ops *dfops,
  1421. xfs_fsblock_t fsb,
  1422. xfs_extlen_t len)
  1423. {
  1424. if (!xfs_sb_version_hasreflink(&mp->m_sb))
  1425. return 0;
  1426. return __xfs_refcount_add(mp, dfops, XFS_REFCOUNT_FREE_COW,
  1427. fsb, len);
  1428. }
  1429. struct xfs_refcount_recovery {
  1430. struct list_head rr_list;
  1431. struct xfs_refcount_irec rr_rrec;
  1432. };
  1433. /* Stuff an extent on the recovery list. */
  1434. STATIC int
  1435. xfs_refcount_recover_extent(
  1436. struct xfs_btree_cur *cur,
  1437. union xfs_btree_rec *rec,
  1438. void *priv)
  1439. {
  1440. struct list_head *debris = priv;
  1441. struct xfs_refcount_recovery *rr;
  1442. if (be32_to_cpu(rec->refc.rc_refcount) != 1)
  1443. return -EFSCORRUPTED;
  1444. rr = kmem_alloc(sizeof(struct xfs_refcount_recovery), KM_SLEEP);
  1445. xfs_refcount_btrec_to_irec(rec, &rr->rr_rrec);
  1446. list_add_tail(&rr->rr_list, debris);
  1447. return 0;
  1448. }
  1449. /* Find and remove leftover CoW reservations. */
  1450. int
  1451. xfs_refcount_recover_cow_leftovers(
  1452. struct xfs_mount *mp,
  1453. xfs_agnumber_t agno)
  1454. {
  1455. struct xfs_trans *tp;
  1456. struct xfs_btree_cur *cur;
  1457. struct xfs_buf *agbp;
  1458. struct xfs_refcount_recovery *rr, *n;
  1459. struct list_head debris;
  1460. union xfs_btree_irec low;
  1461. union xfs_btree_irec high;
  1462. struct xfs_defer_ops dfops;
  1463. xfs_fsblock_t fsb;
  1464. xfs_agblock_t agbno;
  1465. int error;
  1466. if (mp->m_sb.sb_agblocks >= XFS_REFC_COW_START)
  1467. return -EOPNOTSUPP;
  1468. error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
  1469. if (error)
  1470. return error;
  1471. cur = xfs_refcountbt_init_cursor(mp, NULL, agbp, agno, NULL);
  1472. /* Find all the leftover CoW staging extents. */
  1473. INIT_LIST_HEAD(&debris);
  1474. memset(&low, 0, sizeof(low));
  1475. memset(&high, 0, sizeof(high));
  1476. low.rc.rc_startblock = XFS_REFC_COW_START;
  1477. high.rc.rc_startblock = -1U;
  1478. error = xfs_btree_query_range(cur, &low, &high,
  1479. xfs_refcount_recover_extent, &debris);
  1480. if (error)
  1481. goto out_cursor;
  1482. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  1483. xfs_buf_relse(agbp);
  1484. /* Now iterate the list to free the leftovers */
  1485. list_for_each_entry(rr, &debris, rr_list) {
  1486. /* Set up transaction. */
  1487. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
  1488. if (error)
  1489. goto out_free;
  1490. trace_xfs_refcount_recover_extent(mp, agno, &rr->rr_rrec);
  1491. /* Free the orphan record */
  1492. xfs_defer_init(&dfops, &fsb);
  1493. agbno = rr->rr_rrec.rc_startblock - XFS_REFC_COW_START;
  1494. fsb = XFS_AGB_TO_FSB(mp, agno, agbno);
  1495. error = xfs_refcount_free_cow_extent(mp, &dfops, fsb,
  1496. rr->rr_rrec.rc_blockcount);
  1497. if (error)
  1498. goto out_defer;
  1499. /* Free the block. */
  1500. xfs_bmap_add_free(mp, &dfops, fsb,
  1501. rr->rr_rrec.rc_blockcount, NULL);
  1502. error = xfs_defer_finish(&tp, &dfops, NULL);
  1503. if (error)
  1504. goto out_defer;
  1505. error = xfs_trans_commit(tp);
  1506. if (error)
  1507. goto out_free;
  1508. }
  1509. out_free:
  1510. /* Free the leftover list */
  1511. list_for_each_entry_safe(rr, n, &debris, rr_list) {
  1512. list_del(&rr->rr_list);
  1513. kmem_free(rr);
  1514. }
  1515. return error;
  1516. out_cursor:
  1517. xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
  1518. xfs_buf_relse(agbp);
  1519. goto out_free;
  1520. out_defer:
  1521. xfs_defer_cancel(&dfops);
  1522. xfs_trans_cancel(tp);
  1523. goto out_free;
  1524. }