xfs_bmap_btree.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
  4. * All Rights Reserved.
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_bit.h"
  13. #include "xfs_mount.h"
  14. #include "xfs_defer.h"
  15. #include "xfs_inode.h"
  16. #include "xfs_trans.h"
  17. #include "xfs_inode_item.h"
  18. #include "xfs_alloc.h"
  19. #include "xfs_btree.h"
  20. #include "xfs_bmap_btree.h"
  21. #include "xfs_bmap.h"
  22. #include "xfs_error.h"
  23. #include "xfs_quota.h"
  24. #include "xfs_trace.h"
  25. #include "xfs_cksum.h"
  26. #include "xfs_rmap.h"
  27. /*
  28. * Convert on-disk form of btree root to in-memory form.
  29. */
  30. void
  31. xfs_bmdr_to_bmbt(
  32. struct xfs_inode *ip,
  33. xfs_bmdr_block_t *dblock,
  34. int dblocklen,
  35. struct xfs_btree_block *rblock,
  36. int rblocklen)
  37. {
  38. struct xfs_mount *mp = ip->i_mount;
  39. int dmxr;
  40. xfs_bmbt_key_t *fkp;
  41. __be64 *fpp;
  42. xfs_bmbt_key_t *tkp;
  43. __be64 *tpp;
  44. xfs_btree_init_block_int(mp, rblock, XFS_BUF_DADDR_NULL,
  45. XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
  46. XFS_BTREE_LONG_PTRS);
  47. rblock->bb_level = dblock->bb_level;
  48. ASSERT(be16_to_cpu(rblock->bb_level) > 0);
  49. rblock->bb_numrecs = dblock->bb_numrecs;
  50. dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
  51. fkp = XFS_BMDR_KEY_ADDR(dblock, 1);
  52. tkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
  53. fpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
  54. tpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
  55. dmxr = be16_to_cpu(dblock->bb_numrecs);
  56. memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
  57. memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
  58. }
  59. void
  60. xfs_bmbt_disk_get_all(
  61. struct xfs_bmbt_rec *rec,
  62. struct xfs_bmbt_irec *irec)
  63. {
  64. uint64_t l0 = get_unaligned_be64(&rec->l0);
  65. uint64_t l1 = get_unaligned_be64(&rec->l1);
  66. irec->br_startoff = (l0 & xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
  67. irec->br_startblock = ((l0 & xfs_mask64lo(9)) << 43) | (l1 >> 21);
  68. irec->br_blockcount = l1 & xfs_mask64lo(21);
  69. if (l0 >> (64 - BMBT_EXNTFLAG_BITLEN))
  70. irec->br_state = XFS_EXT_UNWRITTEN;
  71. else
  72. irec->br_state = XFS_EXT_NORM;
  73. }
  74. /*
  75. * Extract the blockcount field from an on disk bmap extent record.
  76. */
  77. xfs_filblks_t
  78. xfs_bmbt_disk_get_blockcount(
  79. xfs_bmbt_rec_t *r)
  80. {
  81. return (xfs_filblks_t)(be64_to_cpu(r->l1) & xfs_mask64lo(21));
  82. }
  83. /*
  84. * Extract the startoff field from a disk format bmap extent record.
  85. */
  86. xfs_fileoff_t
  87. xfs_bmbt_disk_get_startoff(
  88. xfs_bmbt_rec_t *r)
  89. {
  90. return ((xfs_fileoff_t)be64_to_cpu(r->l0) &
  91. xfs_mask64lo(64 - BMBT_EXNTFLAG_BITLEN)) >> 9;
  92. }
  93. /*
  94. * Set all the fields in a bmap extent record from the uncompressed form.
  95. */
  96. void
  97. xfs_bmbt_disk_set_all(
  98. struct xfs_bmbt_rec *r,
  99. struct xfs_bmbt_irec *s)
  100. {
  101. int extent_flag = (s->br_state != XFS_EXT_NORM);
  102. ASSERT(s->br_state == XFS_EXT_NORM || s->br_state == XFS_EXT_UNWRITTEN);
  103. ASSERT(!(s->br_startoff & xfs_mask64hi(64-BMBT_STARTOFF_BITLEN)));
  104. ASSERT(!(s->br_blockcount & xfs_mask64hi(64-BMBT_BLOCKCOUNT_BITLEN)));
  105. ASSERT(!(s->br_startblock & xfs_mask64hi(64-BMBT_STARTBLOCK_BITLEN)));
  106. put_unaligned_be64(
  107. ((xfs_bmbt_rec_base_t)extent_flag << 63) |
  108. ((xfs_bmbt_rec_base_t)s->br_startoff << 9) |
  109. ((xfs_bmbt_rec_base_t)s->br_startblock >> 43), &r->l0);
  110. put_unaligned_be64(
  111. ((xfs_bmbt_rec_base_t)s->br_startblock << 21) |
  112. ((xfs_bmbt_rec_base_t)s->br_blockcount &
  113. (xfs_bmbt_rec_base_t)xfs_mask64lo(21)), &r->l1);
  114. }
  115. /*
  116. * Convert in-memory form of btree root to on-disk form.
  117. */
  118. void
  119. xfs_bmbt_to_bmdr(
  120. struct xfs_mount *mp,
  121. struct xfs_btree_block *rblock,
  122. int rblocklen,
  123. xfs_bmdr_block_t *dblock,
  124. int dblocklen)
  125. {
  126. int dmxr;
  127. xfs_bmbt_key_t *fkp;
  128. __be64 *fpp;
  129. xfs_bmbt_key_t *tkp;
  130. __be64 *tpp;
  131. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  132. ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_CRC_MAGIC));
  133. ASSERT(uuid_equal(&rblock->bb_u.l.bb_uuid,
  134. &mp->m_sb.sb_meta_uuid));
  135. ASSERT(rblock->bb_u.l.bb_blkno ==
  136. cpu_to_be64(XFS_BUF_DADDR_NULL));
  137. } else
  138. ASSERT(rblock->bb_magic == cpu_to_be32(XFS_BMAP_MAGIC));
  139. ASSERT(rblock->bb_u.l.bb_leftsib == cpu_to_be64(NULLFSBLOCK));
  140. ASSERT(rblock->bb_u.l.bb_rightsib == cpu_to_be64(NULLFSBLOCK));
  141. ASSERT(rblock->bb_level != 0);
  142. dblock->bb_level = rblock->bb_level;
  143. dblock->bb_numrecs = rblock->bb_numrecs;
  144. dmxr = xfs_bmdr_maxrecs(dblocklen, 0);
  145. fkp = XFS_BMBT_KEY_ADDR(mp, rblock, 1);
  146. tkp = XFS_BMDR_KEY_ADDR(dblock, 1);
  147. fpp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, rblocklen);
  148. tpp = XFS_BMDR_PTR_ADDR(dblock, 1, dmxr);
  149. dmxr = be16_to_cpu(dblock->bb_numrecs);
  150. memcpy(tkp, fkp, sizeof(*fkp) * dmxr);
  151. memcpy(tpp, fpp, sizeof(*fpp) * dmxr);
  152. }
  153. STATIC struct xfs_btree_cur *
  154. xfs_bmbt_dup_cursor(
  155. struct xfs_btree_cur *cur)
  156. {
  157. struct xfs_btree_cur *new;
  158. new = xfs_bmbt_init_cursor(cur->bc_mp, cur->bc_tp,
  159. cur->bc_private.b.ip, cur->bc_private.b.whichfork);
  160. /*
  161. * Copy the firstblock, dfops, and flags values,
  162. * since init cursor doesn't get them.
  163. */
  164. new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
  165. new->bc_private.b.dfops = cur->bc_private.b.dfops;
  166. new->bc_private.b.flags = cur->bc_private.b.flags;
  167. return new;
  168. }
  169. STATIC void
  170. xfs_bmbt_update_cursor(
  171. struct xfs_btree_cur *src,
  172. struct xfs_btree_cur *dst)
  173. {
  174. ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
  175. (dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
  176. ASSERT(dst->bc_private.b.dfops == src->bc_private.b.dfops);
  177. dst->bc_private.b.allocated += src->bc_private.b.allocated;
  178. dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
  179. src->bc_private.b.allocated = 0;
  180. }
  181. STATIC int
  182. xfs_bmbt_alloc_block(
  183. struct xfs_btree_cur *cur,
  184. union xfs_btree_ptr *start,
  185. union xfs_btree_ptr *new,
  186. int *stat)
  187. {
  188. xfs_alloc_arg_t args; /* block allocation args */
  189. int error; /* error return value */
  190. memset(&args, 0, sizeof(args));
  191. args.tp = cur->bc_tp;
  192. args.mp = cur->bc_mp;
  193. args.fsbno = cur->bc_private.b.firstblock;
  194. args.firstblock = args.fsbno;
  195. xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
  196. cur->bc_private.b.whichfork);
  197. if (args.fsbno == NULLFSBLOCK) {
  198. args.fsbno = be64_to_cpu(start->l);
  199. args.type = XFS_ALLOCTYPE_START_BNO;
  200. /*
  201. * Make sure there is sufficient room left in the AG to
  202. * complete a full tree split for an extent insert. If
  203. * we are converting the middle part of an extent then
  204. * we may need space for two tree splits.
  205. *
  206. * We are relying on the caller to make the correct block
  207. * reservation for this operation to succeed. If the
  208. * reservation amount is insufficient then we may fail a
  209. * block allocation here and corrupt the filesystem.
  210. */
  211. args.minleft = args.tp->t_blk_res;
  212. } else if (cur->bc_private.b.dfops->dop_low) {
  213. args.type = XFS_ALLOCTYPE_START_BNO;
  214. } else {
  215. args.type = XFS_ALLOCTYPE_NEAR_BNO;
  216. }
  217. args.minlen = args.maxlen = args.prod = 1;
  218. args.wasdel = cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL;
  219. if (!args.wasdel && args.tp->t_blk_res == 0) {
  220. error = -ENOSPC;
  221. goto error0;
  222. }
  223. error = xfs_alloc_vextent(&args);
  224. if (error)
  225. goto error0;
  226. if (args.fsbno == NULLFSBLOCK && args.minleft) {
  227. /*
  228. * Could not find an AG with enough free space to satisfy
  229. * a full btree split. Try again and if
  230. * successful activate the lowspace algorithm.
  231. */
  232. args.fsbno = 0;
  233. args.type = XFS_ALLOCTYPE_FIRST_AG;
  234. error = xfs_alloc_vextent(&args);
  235. if (error)
  236. goto error0;
  237. cur->bc_private.b.dfops->dop_low = true;
  238. }
  239. if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
  240. *stat = 0;
  241. return 0;
  242. }
  243. ASSERT(args.len == 1);
  244. cur->bc_private.b.firstblock = args.fsbno;
  245. cur->bc_private.b.allocated++;
  246. cur->bc_private.b.ip->i_d.di_nblocks++;
  247. xfs_trans_log_inode(args.tp, cur->bc_private.b.ip, XFS_ILOG_CORE);
  248. xfs_trans_mod_dquot_byino(args.tp, cur->bc_private.b.ip,
  249. XFS_TRANS_DQ_BCOUNT, 1L);
  250. new->l = cpu_to_be64(args.fsbno);
  251. *stat = 1;
  252. return 0;
  253. error0:
  254. return error;
  255. }
  256. STATIC int
  257. xfs_bmbt_free_block(
  258. struct xfs_btree_cur *cur,
  259. struct xfs_buf *bp)
  260. {
  261. struct xfs_mount *mp = cur->bc_mp;
  262. struct xfs_inode *ip = cur->bc_private.b.ip;
  263. struct xfs_trans *tp = cur->bc_tp;
  264. xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
  265. struct xfs_owner_info oinfo;
  266. xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
  267. xfs_bmap_add_free(mp, cur->bc_private.b.dfops, fsbno, 1, &oinfo);
  268. ip->i_d.di_nblocks--;
  269. xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
  270. xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
  271. return 0;
  272. }
  273. STATIC int
  274. xfs_bmbt_get_minrecs(
  275. struct xfs_btree_cur *cur,
  276. int level)
  277. {
  278. if (level == cur->bc_nlevels - 1) {
  279. struct xfs_ifork *ifp;
  280. ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
  281. cur->bc_private.b.whichfork);
  282. return xfs_bmbt_maxrecs(cur->bc_mp,
  283. ifp->if_broot_bytes, level == 0) / 2;
  284. }
  285. return cur->bc_mp->m_bmap_dmnr[level != 0];
  286. }
  287. int
  288. xfs_bmbt_get_maxrecs(
  289. struct xfs_btree_cur *cur,
  290. int level)
  291. {
  292. if (level == cur->bc_nlevels - 1) {
  293. struct xfs_ifork *ifp;
  294. ifp = XFS_IFORK_PTR(cur->bc_private.b.ip,
  295. cur->bc_private.b.whichfork);
  296. return xfs_bmbt_maxrecs(cur->bc_mp,
  297. ifp->if_broot_bytes, level == 0);
  298. }
  299. return cur->bc_mp->m_bmap_dmxr[level != 0];
  300. }
  301. /*
  302. * Get the maximum records we could store in the on-disk format.
  303. *
  304. * For non-root nodes this is equivalent to xfs_bmbt_get_maxrecs, but
  305. * for the root node this checks the available space in the dinode fork
  306. * so that we can resize the in-memory buffer to match it. After a
  307. * resize to the maximum size this function returns the same value
  308. * as xfs_bmbt_get_maxrecs for the root node, too.
  309. */
  310. STATIC int
  311. xfs_bmbt_get_dmaxrecs(
  312. struct xfs_btree_cur *cur,
  313. int level)
  314. {
  315. if (level != cur->bc_nlevels - 1)
  316. return cur->bc_mp->m_bmap_dmxr[level != 0];
  317. return xfs_bmdr_maxrecs(cur->bc_private.b.forksize, level == 0);
  318. }
  319. STATIC void
  320. xfs_bmbt_init_key_from_rec(
  321. union xfs_btree_key *key,
  322. union xfs_btree_rec *rec)
  323. {
  324. key->bmbt.br_startoff =
  325. cpu_to_be64(xfs_bmbt_disk_get_startoff(&rec->bmbt));
  326. }
  327. STATIC void
  328. xfs_bmbt_init_high_key_from_rec(
  329. union xfs_btree_key *key,
  330. union xfs_btree_rec *rec)
  331. {
  332. key->bmbt.br_startoff = cpu_to_be64(
  333. xfs_bmbt_disk_get_startoff(&rec->bmbt) +
  334. xfs_bmbt_disk_get_blockcount(&rec->bmbt) - 1);
  335. }
  336. STATIC void
  337. xfs_bmbt_init_rec_from_cur(
  338. struct xfs_btree_cur *cur,
  339. union xfs_btree_rec *rec)
  340. {
  341. xfs_bmbt_disk_set_all(&rec->bmbt, &cur->bc_rec.b);
  342. }
  343. STATIC void
  344. xfs_bmbt_init_ptr_from_cur(
  345. struct xfs_btree_cur *cur,
  346. union xfs_btree_ptr *ptr)
  347. {
  348. ptr->l = 0;
  349. }
  350. STATIC int64_t
  351. xfs_bmbt_key_diff(
  352. struct xfs_btree_cur *cur,
  353. union xfs_btree_key *key)
  354. {
  355. return (int64_t)be64_to_cpu(key->bmbt.br_startoff) -
  356. cur->bc_rec.b.br_startoff;
  357. }
  358. STATIC int64_t
  359. xfs_bmbt_diff_two_keys(
  360. struct xfs_btree_cur *cur,
  361. union xfs_btree_key *k1,
  362. union xfs_btree_key *k2)
  363. {
  364. return (int64_t)be64_to_cpu(k1->bmbt.br_startoff) -
  365. be64_to_cpu(k2->bmbt.br_startoff);
  366. }
  367. static xfs_failaddr_t
  368. xfs_bmbt_verify(
  369. struct xfs_buf *bp)
  370. {
  371. struct xfs_mount *mp = bp->b_target->bt_mount;
  372. struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
  373. xfs_failaddr_t fa;
  374. unsigned int level;
  375. switch (block->bb_magic) {
  376. case cpu_to_be32(XFS_BMAP_CRC_MAGIC):
  377. /*
  378. * XXX: need a better way of verifying the owner here. Right now
  379. * just make sure there has been one set.
  380. */
  381. fa = xfs_btree_lblock_v5hdr_verify(bp, XFS_RMAP_OWN_UNKNOWN);
  382. if (fa)
  383. return fa;
  384. /* fall through */
  385. case cpu_to_be32(XFS_BMAP_MAGIC):
  386. break;
  387. default:
  388. return __this_address;
  389. }
  390. /*
  391. * numrecs and level verification.
  392. *
  393. * We don't know what fork we belong to, so just verify that the level
  394. * is less than the maximum of the two. Later checks will be more
  395. * precise.
  396. */
  397. level = be16_to_cpu(block->bb_level);
  398. if (level > max(mp->m_bm_maxlevels[0], mp->m_bm_maxlevels[1]))
  399. return __this_address;
  400. return xfs_btree_lblock_verify(bp, mp->m_bmap_dmxr[level != 0]);
  401. }
  402. static void
  403. xfs_bmbt_read_verify(
  404. struct xfs_buf *bp)
  405. {
  406. xfs_failaddr_t fa;
  407. if (!xfs_btree_lblock_verify_crc(bp))
  408. xfs_verifier_error(bp, -EFSBADCRC, __this_address);
  409. else {
  410. fa = xfs_bmbt_verify(bp);
  411. if (fa)
  412. xfs_verifier_error(bp, -EFSCORRUPTED, fa);
  413. }
  414. if (bp->b_error)
  415. trace_xfs_btree_corrupt(bp, _RET_IP_);
  416. }
  417. static void
  418. xfs_bmbt_write_verify(
  419. struct xfs_buf *bp)
  420. {
  421. xfs_failaddr_t fa;
  422. fa = xfs_bmbt_verify(bp);
  423. if (fa) {
  424. trace_xfs_btree_corrupt(bp, _RET_IP_);
  425. xfs_verifier_error(bp, -EFSCORRUPTED, fa);
  426. return;
  427. }
  428. xfs_btree_lblock_calc_crc(bp);
  429. }
  430. const struct xfs_buf_ops xfs_bmbt_buf_ops = {
  431. .name = "xfs_bmbt",
  432. .verify_read = xfs_bmbt_read_verify,
  433. .verify_write = xfs_bmbt_write_verify,
  434. .verify_struct = xfs_bmbt_verify,
  435. };
  436. STATIC int
  437. xfs_bmbt_keys_inorder(
  438. struct xfs_btree_cur *cur,
  439. union xfs_btree_key *k1,
  440. union xfs_btree_key *k2)
  441. {
  442. return be64_to_cpu(k1->bmbt.br_startoff) <
  443. be64_to_cpu(k2->bmbt.br_startoff);
  444. }
  445. STATIC int
  446. xfs_bmbt_recs_inorder(
  447. struct xfs_btree_cur *cur,
  448. union xfs_btree_rec *r1,
  449. union xfs_btree_rec *r2)
  450. {
  451. return xfs_bmbt_disk_get_startoff(&r1->bmbt) +
  452. xfs_bmbt_disk_get_blockcount(&r1->bmbt) <=
  453. xfs_bmbt_disk_get_startoff(&r2->bmbt);
  454. }
  455. static const struct xfs_btree_ops xfs_bmbt_ops = {
  456. .rec_len = sizeof(xfs_bmbt_rec_t),
  457. .key_len = sizeof(xfs_bmbt_key_t),
  458. .dup_cursor = xfs_bmbt_dup_cursor,
  459. .update_cursor = xfs_bmbt_update_cursor,
  460. .alloc_block = xfs_bmbt_alloc_block,
  461. .free_block = xfs_bmbt_free_block,
  462. .get_maxrecs = xfs_bmbt_get_maxrecs,
  463. .get_minrecs = xfs_bmbt_get_minrecs,
  464. .get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
  465. .init_key_from_rec = xfs_bmbt_init_key_from_rec,
  466. .init_high_key_from_rec = xfs_bmbt_init_high_key_from_rec,
  467. .init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
  468. .init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
  469. .key_diff = xfs_bmbt_key_diff,
  470. .diff_two_keys = xfs_bmbt_diff_two_keys,
  471. .buf_ops = &xfs_bmbt_buf_ops,
  472. .keys_inorder = xfs_bmbt_keys_inorder,
  473. .recs_inorder = xfs_bmbt_recs_inorder,
  474. };
  475. /*
  476. * Allocate a new bmap btree cursor.
  477. */
  478. struct xfs_btree_cur * /* new bmap btree cursor */
  479. xfs_bmbt_init_cursor(
  480. struct xfs_mount *mp, /* file system mount point */
  481. struct xfs_trans *tp, /* transaction pointer */
  482. struct xfs_inode *ip, /* inode owning the btree */
  483. int whichfork) /* data or attr fork */
  484. {
  485. struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
  486. struct xfs_btree_cur *cur;
  487. ASSERT(whichfork != XFS_COW_FORK);
  488. cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
  489. cur->bc_tp = tp;
  490. cur->bc_mp = mp;
  491. cur->bc_nlevels = be16_to_cpu(ifp->if_broot->bb_level) + 1;
  492. cur->bc_btnum = XFS_BTNUM_BMAP;
  493. cur->bc_blocklog = mp->m_sb.sb_blocklog;
  494. cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_bmbt_2);
  495. cur->bc_ops = &xfs_bmbt_ops;
  496. cur->bc_flags = XFS_BTREE_LONG_PTRS | XFS_BTREE_ROOT_IN_INODE;
  497. if (xfs_sb_version_hascrc(&mp->m_sb))
  498. cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
  499. cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
  500. cur->bc_private.b.ip = ip;
  501. cur->bc_private.b.firstblock = NULLFSBLOCK;
  502. cur->bc_private.b.dfops = NULL;
  503. cur->bc_private.b.allocated = 0;
  504. cur->bc_private.b.flags = 0;
  505. cur->bc_private.b.whichfork = whichfork;
  506. return cur;
  507. }
  508. /*
  509. * Calculate number of records in a bmap btree block.
  510. */
  511. int
  512. xfs_bmbt_maxrecs(
  513. struct xfs_mount *mp,
  514. int blocklen,
  515. int leaf)
  516. {
  517. blocklen -= XFS_BMBT_BLOCK_LEN(mp);
  518. if (leaf)
  519. return blocklen / sizeof(xfs_bmbt_rec_t);
  520. return blocklen / (sizeof(xfs_bmbt_key_t) + sizeof(xfs_bmbt_ptr_t));
  521. }
  522. /*
  523. * Calculate number of records in a bmap btree inode root.
  524. */
  525. int
  526. xfs_bmdr_maxrecs(
  527. int blocklen,
  528. int leaf)
  529. {
  530. blocklen -= sizeof(xfs_bmdr_block_t);
  531. if (leaf)
  532. return blocklen / sizeof(xfs_bmdr_rec_t);
  533. return blocklen / (sizeof(xfs_bmdr_key_t) + sizeof(xfs_bmdr_ptr_t));
  534. }
  535. /*
  536. * Change the owner of a btree format fork fo the inode passed in. Change it to
  537. * the owner of that is passed in so that we can change owners before or after
  538. * we switch forks between inodes. The operation that the caller is doing will
  539. * determine whether is needs to change owner before or after the switch.
  540. *
  541. * For demand paged transactional modification, the fork switch should be done
  542. * after reading in all the blocks, modifying them and pinning them in the
  543. * transaction. For modification when the buffers are already pinned in memory,
  544. * the fork switch can be done before changing the owner as we won't need to
  545. * validate the owner until the btree buffers are unpinned and writes can occur
  546. * again.
  547. *
  548. * For recovery based ownership change, there is no transactional context and
  549. * so a buffer list must be supplied so that we can record the buffers that we
  550. * modified for the caller to issue IO on.
  551. */
  552. int
  553. xfs_bmbt_change_owner(
  554. struct xfs_trans *tp,
  555. struct xfs_inode *ip,
  556. int whichfork,
  557. xfs_ino_t new_owner,
  558. struct list_head *buffer_list)
  559. {
  560. struct xfs_btree_cur *cur;
  561. int error;
  562. ASSERT(tp || buffer_list);
  563. ASSERT(!(tp && buffer_list));
  564. if (whichfork == XFS_DATA_FORK)
  565. ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_BTREE);
  566. else
  567. ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE);
  568. cur = xfs_bmbt_init_cursor(ip->i_mount, tp, ip, whichfork);
  569. if (!cur)
  570. return -ENOMEM;
  571. cur->bc_private.b.flags |= XFS_BTCUR_BPRV_INVALID_OWNER;
  572. error = xfs_btree_change_owner(cur, new_owner, buffer_list);
  573. xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
  574. return error;
  575. }
  576. /* Calculate the bmap btree size for some records. */
  577. unsigned long long
  578. xfs_bmbt_calc_size(
  579. struct xfs_mount *mp,
  580. unsigned long long len)
  581. {
  582. return xfs_btree_calc_size(mp->m_bmap_dmnr, len);
  583. }