xfs_refcount_btree.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2016 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_sb.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_btree.h"
  15. #include "xfs_bmap.h"
  16. #include "xfs_refcount_btree.h"
  17. #include "xfs_alloc.h"
  18. #include "xfs_error.h"
  19. #include "xfs_trace.h"
  20. #include "xfs_cksum.h"
  21. #include "xfs_trans.h"
  22. #include "xfs_bit.h"
  23. #include "xfs_rmap.h"
  24. static struct xfs_btree_cur *
  25. xfs_refcountbt_dup_cursor(
  26. struct xfs_btree_cur *cur)
  27. {
  28. return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
  29. cur->bc_private.a.agbp, cur->bc_private.a.agno,
  30. cur->bc_private.a.dfops);
  31. }
  32. STATIC void
  33. xfs_refcountbt_set_root(
  34. struct xfs_btree_cur *cur,
  35. union xfs_btree_ptr *ptr,
  36. int inc)
  37. {
  38. struct xfs_buf *agbp = cur->bc_private.a.agbp;
  39. struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
  40. xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
  41. struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
  42. ASSERT(ptr->s != 0);
  43. agf->agf_refcount_root = ptr->s;
  44. be32_add_cpu(&agf->agf_refcount_level, inc);
  45. pag->pagf_refcount_level += inc;
  46. xfs_perag_put(pag);
  47. xfs_alloc_log_agf(cur->bc_tp, agbp,
  48. XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
  49. }
  50. STATIC int
  51. xfs_refcountbt_alloc_block(
  52. struct xfs_btree_cur *cur,
  53. union xfs_btree_ptr *start,
  54. union xfs_btree_ptr *new,
  55. int *stat)
  56. {
  57. struct xfs_buf *agbp = cur->bc_private.a.agbp;
  58. struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
  59. struct xfs_alloc_arg args; /* block allocation args */
  60. int error; /* error return value */
  61. memset(&args, 0, sizeof(args));
  62. args.tp = cur->bc_tp;
  63. args.mp = cur->bc_mp;
  64. args.type = XFS_ALLOCTYPE_NEAR_BNO;
  65. args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
  66. xfs_refc_block(args.mp));
  67. args.firstblock = args.fsbno;
  68. xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_REFC);
  69. args.minlen = args.maxlen = args.prod = 1;
  70. args.resv = XFS_AG_RESV_METADATA;
  71. error = xfs_alloc_vextent(&args);
  72. if (error)
  73. goto out_error;
  74. trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
  75. args.agbno, 1);
  76. if (args.fsbno == NULLFSBLOCK) {
  77. *stat = 0;
  78. return 0;
  79. }
  80. ASSERT(args.agno == cur->bc_private.a.agno);
  81. ASSERT(args.len == 1);
  82. new->s = cpu_to_be32(args.agbno);
  83. be32_add_cpu(&agf->agf_refcount_blocks, 1);
  84. xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
  85. *stat = 1;
  86. return 0;
  87. out_error:
  88. return error;
  89. }
  90. STATIC int
  91. xfs_refcountbt_free_block(
  92. struct xfs_btree_cur *cur,
  93. struct xfs_buf *bp)
  94. {
  95. struct xfs_mount *mp = cur->bc_mp;
  96. struct xfs_buf *agbp = cur->bc_private.a.agbp;
  97. struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
  98. xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
  99. struct xfs_owner_info oinfo;
  100. int error;
  101. trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
  102. XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
  103. xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
  104. be32_add_cpu(&agf->agf_refcount_blocks, -1);
  105. xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
  106. error = xfs_free_extent(cur->bc_tp, fsbno, 1, &oinfo,
  107. XFS_AG_RESV_METADATA);
  108. if (error)
  109. return error;
  110. return error;
  111. }
  112. STATIC int
  113. xfs_refcountbt_get_minrecs(
  114. struct xfs_btree_cur *cur,
  115. int level)
  116. {
  117. return cur->bc_mp->m_refc_mnr[level != 0];
  118. }
  119. STATIC int
  120. xfs_refcountbt_get_maxrecs(
  121. struct xfs_btree_cur *cur,
  122. int level)
  123. {
  124. return cur->bc_mp->m_refc_mxr[level != 0];
  125. }
  126. STATIC void
  127. xfs_refcountbt_init_key_from_rec(
  128. union xfs_btree_key *key,
  129. union xfs_btree_rec *rec)
  130. {
  131. key->refc.rc_startblock = rec->refc.rc_startblock;
  132. }
  133. STATIC void
  134. xfs_refcountbt_init_high_key_from_rec(
  135. union xfs_btree_key *key,
  136. union xfs_btree_rec *rec)
  137. {
  138. __u32 x;
  139. x = be32_to_cpu(rec->refc.rc_startblock);
  140. x += be32_to_cpu(rec->refc.rc_blockcount) - 1;
  141. key->refc.rc_startblock = cpu_to_be32(x);
  142. }
  143. STATIC void
  144. xfs_refcountbt_init_rec_from_cur(
  145. struct xfs_btree_cur *cur,
  146. union xfs_btree_rec *rec)
  147. {
  148. rec->refc.rc_startblock = cpu_to_be32(cur->bc_rec.rc.rc_startblock);
  149. rec->refc.rc_blockcount = cpu_to_be32(cur->bc_rec.rc.rc_blockcount);
  150. rec->refc.rc_refcount = cpu_to_be32(cur->bc_rec.rc.rc_refcount);
  151. }
  152. STATIC void
  153. xfs_refcountbt_init_ptr_from_cur(
  154. struct xfs_btree_cur *cur,
  155. union xfs_btree_ptr *ptr)
  156. {
  157. struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
  158. ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
  159. ptr->s = agf->agf_refcount_root;
  160. }
  161. STATIC int64_t
  162. xfs_refcountbt_key_diff(
  163. struct xfs_btree_cur *cur,
  164. union xfs_btree_key *key)
  165. {
  166. struct xfs_refcount_irec *rec = &cur->bc_rec.rc;
  167. struct xfs_refcount_key *kp = &key->refc;
  168. return (int64_t)be32_to_cpu(kp->rc_startblock) - rec->rc_startblock;
  169. }
  170. STATIC int64_t
  171. xfs_refcountbt_diff_two_keys(
  172. struct xfs_btree_cur *cur,
  173. union xfs_btree_key *k1,
  174. union xfs_btree_key *k2)
  175. {
  176. return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
  177. be32_to_cpu(k2->refc.rc_startblock);
  178. }
  179. STATIC xfs_failaddr_t
  180. xfs_refcountbt_verify(
  181. struct xfs_buf *bp)
  182. {
  183. struct xfs_mount *mp = bp->b_target->bt_mount;
  184. struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
  185. struct xfs_perag *pag = bp->b_pag;
  186. xfs_failaddr_t fa;
  187. unsigned int level;
  188. if (block->bb_magic != cpu_to_be32(XFS_REFC_CRC_MAGIC))
  189. return __this_address;
  190. if (!xfs_sb_version_hasreflink(&mp->m_sb))
  191. return __this_address;
  192. fa = xfs_btree_sblock_v5hdr_verify(bp);
  193. if (fa)
  194. return fa;
  195. level = be16_to_cpu(block->bb_level);
  196. if (pag && pag->pagf_init) {
  197. if (level >= pag->pagf_refcount_level)
  198. return __this_address;
  199. } else if (level >= mp->m_refc_maxlevels)
  200. return __this_address;
  201. return xfs_btree_sblock_verify(bp, mp->m_refc_mxr[level != 0]);
  202. }
  203. STATIC void
  204. xfs_refcountbt_read_verify(
  205. struct xfs_buf *bp)
  206. {
  207. xfs_failaddr_t fa;
  208. if (!xfs_btree_sblock_verify_crc(bp))
  209. xfs_verifier_error(bp, -EFSBADCRC, __this_address);
  210. else {
  211. fa = xfs_refcountbt_verify(bp);
  212. if (fa)
  213. xfs_verifier_error(bp, -EFSCORRUPTED, fa);
  214. }
  215. if (bp->b_error)
  216. trace_xfs_btree_corrupt(bp, _RET_IP_);
  217. }
  218. STATIC void
  219. xfs_refcountbt_write_verify(
  220. struct xfs_buf *bp)
  221. {
  222. xfs_failaddr_t fa;
  223. fa = xfs_refcountbt_verify(bp);
  224. if (fa) {
  225. trace_xfs_btree_corrupt(bp, _RET_IP_);
  226. xfs_verifier_error(bp, -EFSCORRUPTED, fa);
  227. return;
  228. }
  229. xfs_btree_sblock_calc_crc(bp);
  230. }
  231. const struct xfs_buf_ops xfs_refcountbt_buf_ops = {
  232. .name = "xfs_refcountbt",
  233. .verify_read = xfs_refcountbt_read_verify,
  234. .verify_write = xfs_refcountbt_write_verify,
  235. .verify_struct = xfs_refcountbt_verify,
  236. };
  237. STATIC int
  238. xfs_refcountbt_keys_inorder(
  239. struct xfs_btree_cur *cur,
  240. union xfs_btree_key *k1,
  241. union xfs_btree_key *k2)
  242. {
  243. return be32_to_cpu(k1->refc.rc_startblock) <
  244. be32_to_cpu(k2->refc.rc_startblock);
  245. }
  246. STATIC int
  247. xfs_refcountbt_recs_inorder(
  248. struct xfs_btree_cur *cur,
  249. union xfs_btree_rec *r1,
  250. union xfs_btree_rec *r2)
  251. {
  252. return be32_to_cpu(r1->refc.rc_startblock) +
  253. be32_to_cpu(r1->refc.rc_blockcount) <=
  254. be32_to_cpu(r2->refc.rc_startblock);
  255. }
  256. static const struct xfs_btree_ops xfs_refcountbt_ops = {
  257. .rec_len = sizeof(struct xfs_refcount_rec),
  258. .key_len = sizeof(struct xfs_refcount_key),
  259. .dup_cursor = xfs_refcountbt_dup_cursor,
  260. .set_root = xfs_refcountbt_set_root,
  261. .alloc_block = xfs_refcountbt_alloc_block,
  262. .free_block = xfs_refcountbt_free_block,
  263. .get_minrecs = xfs_refcountbt_get_minrecs,
  264. .get_maxrecs = xfs_refcountbt_get_maxrecs,
  265. .init_key_from_rec = xfs_refcountbt_init_key_from_rec,
  266. .init_high_key_from_rec = xfs_refcountbt_init_high_key_from_rec,
  267. .init_rec_from_cur = xfs_refcountbt_init_rec_from_cur,
  268. .init_ptr_from_cur = xfs_refcountbt_init_ptr_from_cur,
  269. .key_diff = xfs_refcountbt_key_diff,
  270. .buf_ops = &xfs_refcountbt_buf_ops,
  271. .diff_two_keys = xfs_refcountbt_diff_two_keys,
  272. .keys_inorder = xfs_refcountbt_keys_inorder,
  273. .recs_inorder = xfs_refcountbt_recs_inorder,
  274. };
  275. /*
  276. * Allocate a new refcount btree cursor.
  277. */
  278. struct xfs_btree_cur *
  279. xfs_refcountbt_init_cursor(
  280. struct xfs_mount *mp,
  281. struct xfs_trans *tp,
  282. struct xfs_buf *agbp,
  283. xfs_agnumber_t agno,
  284. struct xfs_defer_ops *dfops)
  285. {
  286. struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
  287. struct xfs_btree_cur *cur;
  288. ASSERT(agno != NULLAGNUMBER);
  289. ASSERT(agno < mp->m_sb.sb_agcount);
  290. cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
  291. cur->bc_tp = tp;
  292. cur->bc_mp = mp;
  293. cur->bc_btnum = XFS_BTNUM_REFC;
  294. cur->bc_blocklog = mp->m_sb.sb_blocklog;
  295. cur->bc_ops = &xfs_refcountbt_ops;
  296. cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
  297. cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
  298. cur->bc_private.a.agbp = agbp;
  299. cur->bc_private.a.agno = agno;
  300. cur->bc_private.a.dfops = dfops;
  301. cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
  302. cur->bc_private.a.priv.refc.nr_ops = 0;
  303. cur->bc_private.a.priv.refc.shape_changes = 0;
  304. return cur;
  305. }
  306. /*
  307. * Calculate the number of records in a refcount btree block.
  308. */
  309. int
  310. xfs_refcountbt_maxrecs(
  311. int blocklen,
  312. bool leaf)
  313. {
  314. blocklen -= XFS_REFCOUNT_BLOCK_LEN;
  315. if (leaf)
  316. return blocklen / sizeof(struct xfs_refcount_rec);
  317. return blocklen / (sizeof(struct xfs_refcount_key) +
  318. sizeof(xfs_refcount_ptr_t));
  319. }
  320. /* Compute the maximum height of a refcount btree. */
  321. void
  322. xfs_refcountbt_compute_maxlevels(
  323. struct xfs_mount *mp)
  324. {
  325. mp->m_refc_maxlevels = xfs_btree_compute_maxlevels(
  326. mp->m_refc_mnr, mp->m_sb.sb_agblocks);
  327. }
  328. /* Calculate the refcount btree size for some records. */
  329. xfs_extlen_t
  330. xfs_refcountbt_calc_size(
  331. struct xfs_mount *mp,
  332. unsigned long long len)
  333. {
  334. return xfs_btree_calc_size(mp->m_refc_mnr, len);
  335. }
  336. /*
  337. * Calculate the maximum refcount btree size.
  338. */
  339. xfs_extlen_t
  340. xfs_refcountbt_max_size(
  341. struct xfs_mount *mp,
  342. xfs_agblock_t agblocks)
  343. {
  344. /* Bail out if we're uninitialized, which can happen in mkfs. */
  345. if (mp->m_refc_mxr[0] == 0)
  346. return 0;
  347. return xfs_refcountbt_calc_size(mp, agblocks);
  348. }
  349. /*
  350. * Figure out how many blocks to reserve and how many are used by this btree.
  351. */
  352. int
  353. xfs_refcountbt_calc_reserves(
  354. struct xfs_mount *mp,
  355. xfs_agnumber_t agno,
  356. xfs_extlen_t *ask,
  357. xfs_extlen_t *used)
  358. {
  359. struct xfs_buf *agbp;
  360. struct xfs_agf *agf;
  361. xfs_agblock_t agblocks;
  362. xfs_extlen_t tree_len;
  363. int error;
  364. if (!xfs_sb_version_hasreflink(&mp->m_sb))
  365. return 0;
  366. error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
  367. if (error)
  368. return error;
  369. agf = XFS_BUF_TO_AGF(agbp);
  370. agblocks = be32_to_cpu(agf->agf_length);
  371. tree_len = be32_to_cpu(agf->agf_refcount_blocks);
  372. xfs_buf_relse(agbp);
  373. *ask += xfs_refcountbt_max_size(mp, agblocks);
  374. *used += tree_len;
  375. return error;
  376. }