xfs_defer.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2016 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_log_format.h"
  11. #include "xfs_trans_resv.h"
  12. #include "xfs_bit.h"
  13. #include "xfs_sb.h"
  14. #include "xfs_mount.h"
  15. #include "xfs_defer.h"
  16. #include "xfs_trans.h"
  17. #include "xfs_trace.h"
  18. /*
  19. * Deferred Operations in XFS
  20. *
  21. * Due to the way locking rules work in XFS, certain transactions (block
  22. * mapping and unmapping, typically) have permanent reservations so that
  23. * we can roll the transaction to adhere to AG locking order rules and
  24. * to unlock buffers between metadata updates. Prior to rmap/reflink,
  25. * the mapping code had a mechanism to perform these deferrals for
  26. * extents that were going to be freed; this code makes that facility
  27. * more generic.
  28. *
  29. * When adding the reverse mapping and reflink features, it became
  30. * necessary to perform complex remapping multi-transactions to comply
  31. * with AG locking order rules, and to be able to spread a single
  32. * refcount update operation (an operation on an n-block extent can
  33. * update as many as n records!) among multiple transactions. XFS can
  34. * roll a transaction to facilitate this, but using this facility
  35. * requires us to log "intent" items in case log recovery needs to
  36. * redo the operation, and to log "done" items to indicate that redo
  37. * is not necessary.
  38. *
  39. * Deferred work is tracked in xfs_defer_pending items. Each pending
  40. * item tracks one type of deferred work. Incoming work items (which
  41. * have not yet had an intent logged) are attached to a pending item
  42. * on the dop_intake list, where they wait for the caller to finish
  43. * the deferred operations.
  44. *
  45. * Finishing a set of deferred operations is an involved process. To
  46. * start, we define "rolling a deferred-op transaction" as follows:
  47. *
  48. * > For each xfs_defer_pending item on the dop_intake list,
  49. * - Sort the work items in AG order. XFS locking
  50. * order rules require us to lock buffers in AG order.
  51. * - Create a log intent item for that type.
  52. * - Attach it to the pending item.
  53. * - Move the pending item from the dop_intake list to the
  54. * dop_pending list.
  55. * > Roll the transaction.
  56. *
  57. * NOTE: To avoid exceeding the transaction reservation, we limit the
  58. * number of items that we attach to a given xfs_defer_pending.
  59. *
  60. * The actual finishing process looks like this:
  61. *
  62. * > For each xfs_defer_pending in the dop_pending list,
  63. * - Roll the deferred-op transaction as above.
  64. * - Create a log done item for that type, and attach it to the
  65. * log intent item.
  66. * - For each work item attached to the log intent item,
  67. * * Perform the described action.
  68. * * Attach the work item to the log done item.
  69. * * If the result of doing the work was -EAGAIN, ->finish work
  70. * wants a new transaction. See the "Requesting a Fresh
  71. * Transaction while Finishing Deferred Work" section below for
  72. * details.
  73. *
  74. * The key here is that we must log an intent item for all pending
  75. * work items every time we roll the transaction, and that we must log
  76. * a done item as soon as the work is completed. With this mechanism
  77. * we can perform complex remapping operations, chaining intent items
  78. * as needed.
  79. *
  80. * Requesting a Fresh Transaction while Finishing Deferred Work
  81. *
  82. * If ->finish_item decides that it needs a fresh transaction to
  83. * finish the work, it must ask its caller (xfs_defer_finish) for a
  84. * continuation. The most likely cause of this circumstance are the
  85. * refcount adjust functions deciding that they've logged enough items
  86. * to be at risk of exceeding the transaction reservation.
  87. *
  88. * To get a fresh transaction, we want to log the existing log done
  89. * item to prevent the log intent item from replaying, immediately log
  90. * a new log intent item with the unfinished work items, roll the
  91. * transaction, and re-call ->finish_item wherever it left off. The
  92. * log done item and the new log intent item must be in the same
  93. * transaction or atomicity cannot be guaranteed; defer_finish ensures
  94. * that this happens.
  95. *
  96. * This requires some coordination between ->finish_item and
  97. * defer_finish. Upon deciding to request a new transaction,
  98. * ->finish_item should update the current work item to reflect the
  99. * unfinished work. Next, it should reset the log done item's list
  100. * count to the number of items finished, and return -EAGAIN.
  101. * defer_finish sees the -EAGAIN, logs the new log intent item
  102. * with the remaining work items, and leaves the xfs_defer_pending
  103. * item at the head of the dop_work queue. Then it rolls the
  104. * transaction and picks up processing where it left off. It is
  105. * required that ->finish_item must be careful to leave enough
  106. * transaction reservation to fit the new log intent item.
  107. *
  108. * This is an example of remapping the extent (E, E+B) into file X at
  109. * offset A and dealing with the extent (C, C+B) already being mapped
  110. * there:
  111. * +-------------------------------------------------+
  112. * | Unmap file X startblock C offset A length B | t0
  113. * | Intent to reduce refcount for extent (C, B) |
  114. * | Intent to remove rmap (X, C, A, B) |
  115. * | Intent to free extent (D, 1) (bmbt block) |
  116. * | Intent to map (X, A, B) at startblock E |
  117. * +-------------------------------------------------+
  118. * | Map file X startblock E offset A length B | t1
  119. * | Done mapping (X, E, A, B) |
  120. * | Intent to increase refcount for extent (E, B) |
  121. * | Intent to add rmap (X, E, A, B) |
  122. * +-------------------------------------------------+
  123. * | Reduce refcount for extent (C, B) | t2
  124. * | Done reducing refcount for extent (C, 9) |
  125. * | Intent to reduce refcount for extent (C+9, B-9) |
  126. * | (ran out of space after 9 refcount updates) |
  127. * +-------------------------------------------------+
  128. * | Reduce refcount for extent (C+9, B+9) | t3
  129. * | Done reducing refcount for extent (C+9, B-9) |
  130. * | Increase refcount for extent (E, B) |
  131. * | Done increasing refcount for extent (E, B) |
  132. * | Intent to free extent (C, B) |
  133. * | Intent to free extent (F, 1) (refcountbt block) |
  134. * | Intent to remove rmap (F, 1, REFC) |
  135. * +-------------------------------------------------+
  136. * | Remove rmap (X, C, A, B) | t4
  137. * | Done removing rmap (X, C, A, B) |
  138. * | Add rmap (X, E, A, B) |
  139. * | Done adding rmap (X, E, A, B) |
  140. * | Remove rmap (F, 1, REFC) |
  141. * | Done removing rmap (F, 1, REFC) |
  142. * +-------------------------------------------------+
  143. * | Free extent (C, B) | t5
  144. * | Done freeing extent (C, B) |
  145. * | Free extent (D, 1) |
  146. * | Done freeing extent (D, 1) |
  147. * | Free extent (F, 1) |
  148. * | Done freeing extent (F, 1) |
  149. * +-------------------------------------------------+
  150. *
  151. * If we should crash before t2 commits, log recovery replays
  152. * the following intent items:
  153. *
  154. * - Intent to reduce refcount for extent (C, B)
  155. * - Intent to remove rmap (X, C, A, B)
  156. * - Intent to free extent (D, 1) (bmbt block)
  157. * - Intent to increase refcount for extent (E, B)
  158. * - Intent to add rmap (X, E, A, B)
  159. *
  160. * In the process of recovering, it should also generate and take care
  161. * of these intent items:
  162. *
  163. * - Intent to free extent (C, B)
  164. * - Intent to free extent (F, 1) (refcountbt block)
  165. * - Intent to remove rmap (F, 1, REFC)
  166. *
  167. * Note that the continuation requested between t2 and t3 is likely to
  168. * reoccur.
  169. */
  170. static const struct xfs_defer_op_type *defer_op_types[XFS_DEFER_OPS_TYPE_MAX];
  171. /*
  172. * For each pending item in the intake list, log its intent item and the
  173. * associated extents, then add the entire intake list to the end of
  174. * the pending list.
  175. */
  176. STATIC void
  177. xfs_defer_intake_work(
  178. struct xfs_trans *tp,
  179. struct xfs_defer_ops *dop)
  180. {
  181. struct list_head *li;
  182. struct xfs_defer_pending *dfp;
  183. list_for_each_entry(dfp, &dop->dop_intake, dfp_list) {
  184. dfp->dfp_intent = dfp->dfp_type->create_intent(tp,
  185. dfp->dfp_count);
  186. trace_xfs_defer_intake_work(tp->t_mountp, dfp);
  187. list_sort(tp->t_mountp, &dfp->dfp_work,
  188. dfp->dfp_type->diff_items);
  189. list_for_each(li, &dfp->dfp_work)
  190. dfp->dfp_type->log_item(tp, dfp->dfp_intent, li);
  191. }
  192. list_splice_tail_init(&dop->dop_intake, &dop->dop_pending);
  193. }
  194. /* Abort all the intents that were committed. */
  195. STATIC void
  196. xfs_defer_trans_abort(
  197. struct xfs_trans *tp,
  198. struct xfs_defer_ops *dop,
  199. int error)
  200. {
  201. struct xfs_defer_pending *dfp;
  202. trace_xfs_defer_trans_abort(tp->t_mountp, dop, _RET_IP_);
  203. /* Abort intent items that don't have a done item. */
  204. list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
  205. trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
  206. if (dfp->dfp_intent && !dfp->dfp_done) {
  207. dfp->dfp_type->abort_intent(dfp->dfp_intent);
  208. dfp->dfp_intent = NULL;
  209. }
  210. }
  211. /* Shut down FS. */
  212. xfs_force_shutdown(tp->t_mountp, (error == -EFSCORRUPTED) ?
  213. SHUTDOWN_CORRUPT_INCORE : SHUTDOWN_META_IO_ERROR);
  214. }
  215. /* Roll a transaction so we can do some deferred op processing. */
  216. STATIC int
  217. xfs_defer_trans_roll(
  218. struct xfs_trans **tp,
  219. struct xfs_defer_ops *dop)
  220. {
  221. int i;
  222. int error;
  223. /* Log all the joined inodes. */
  224. for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
  225. xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
  226. /* Hold the (previously bjoin'd) buffer locked across the roll. */
  227. for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++)
  228. xfs_trans_dirty_buf(*tp, dop->dop_bufs[i]);
  229. trace_xfs_defer_trans_roll((*tp)->t_mountp, dop, _RET_IP_);
  230. /* Roll the transaction. */
  231. error = xfs_trans_roll(tp);
  232. if (error) {
  233. trace_xfs_defer_trans_roll_error((*tp)->t_mountp, dop, error);
  234. xfs_defer_trans_abort(*tp, dop, error);
  235. return error;
  236. }
  237. dop->dop_committed = true;
  238. /* Rejoin the joined inodes. */
  239. for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++)
  240. xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
  241. /* Rejoin the buffers and dirty them so the log moves forward. */
  242. for (i = 0; i < XFS_DEFER_OPS_NR_BUFS && dop->dop_bufs[i]; i++) {
  243. xfs_trans_bjoin(*tp, dop->dop_bufs[i]);
  244. xfs_trans_bhold(*tp, dop->dop_bufs[i]);
  245. }
  246. return error;
  247. }
  248. /* Do we have any work items to finish? */
  249. bool
  250. xfs_defer_has_unfinished_work(
  251. struct xfs_defer_ops *dop)
  252. {
  253. return !list_empty(&dop->dop_pending) || !list_empty(&dop->dop_intake);
  254. }
  255. /*
  256. * Add this inode to the deferred op. Each joined inode is relogged
  257. * each time we roll the transaction.
  258. */
  259. int
  260. xfs_defer_ijoin(
  261. struct xfs_defer_ops *dop,
  262. struct xfs_inode *ip)
  263. {
  264. int i;
  265. for (i = 0; i < XFS_DEFER_OPS_NR_INODES; i++) {
  266. if (dop->dop_inodes[i] == ip)
  267. return 0;
  268. else if (dop->dop_inodes[i] == NULL) {
  269. dop->dop_inodes[i] = ip;
  270. return 0;
  271. }
  272. }
  273. ASSERT(0);
  274. return -EFSCORRUPTED;
  275. }
  276. /*
  277. * Add this buffer to the deferred op. Each joined buffer is relogged
  278. * each time we roll the transaction.
  279. */
  280. int
  281. xfs_defer_bjoin(
  282. struct xfs_defer_ops *dop,
  283. struct xfs_buf *bp)
  284. {
  285. int i;
  286. for (i = 0; i < XFS_DEFER_OPS_NR_BUFS; i++) {
  287. if (dop->dop_bufs[i] == bp)
  288. return 0;
  289. else if (dop->dop_bufs[i] == NULL) {
  290. dop->dop_bufs[i] = bp;
  291. return 0;
  292. }
  293. }
  294. ASSERT(0);
  295. return -EFSCORRUPTED;
  296. }
  297. /*
  298. * Finish all the pending work. This involves logging intent items for
  299. * any work items that wandered in since the last transaction roll (if
  300. * one has even happened), rolling the transaction, and finishing the
  301. * work items in the first item on the logged-and-pending list.
  302. *
  303. * If an inode is provided, relog it to the new transaction.
  304. */
  305. int
  306. xfs_defer_finish(
  307. struct xfs_trans **tp,
  308. struct xfs_defer_ops *dop)
  309. {
  310. struct xfs_defer_pending *dfp;
  311. struct list_head *li;
  312. struct list_head *n;
  313. void *state;
  314. int error = 0;
  315. void (*cleanup_fn)(struct xfs_trans *, void *, int);
  316. struct xfs_defer_ops *orig_dop;
  317. ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
  318. trace_xfs_defer_finish((*tp)->t_mountp, dop, _RET_IP_);
  319. /*
  320. * Attach dfops to the transaction during deferred ops processing. This
  321. * explicitly causes calls into the allocator to defer AGFL block frees.
  322. * Note that this code can go away once all dfops users attach to the
  323. * associated tp.
  324. */
  325. ASSERT(!(*tp)->t_agfl_dfops || ((*tp)->t_agfl_dfops == dop));
  326. orig_dop = (*tp)->t_agfl_dfops;
  327. (*tp)->t_agfl_dfops = dop;
  328. /* Until we run out of pending work to finish... */
  329. while (xfs_defer_has_unfinished_work(dop)) {
  330. /* Log intents for work items sitting in the intake. */
  331. xfs_defer_intake_work(*tp, dop);
  332. /* Roll the transaction. */
  333. error = xfs_defer_trans_roll(tp, dop);
  334. if (error)
  335. goto out;
  336. /* Log an intent-done item for the first pending item. */
  337. dfp = list_first_entry(&dop->dop_pending,
  338. struct xfs_defer_pending, dfp_list);
  339. trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
  340. dfp->dfp_done = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
  341. dfp->dfp_count);
  342. cleanup_fn = dfp->dfp_type->finish_cleanup;
  343. /* Finish the work items. */
  344. state = NULL;
  345. list_for_each_safe(li, n, &dfp->dfp_work) {
  346. list_del(li);
  347. dfp->dfp_count--;
  348. error = dfp->dfp_type->finish_item(*tp, dop, li,
  349. dfp->dfp_done, &state);
  350. if (error == -EAGAIN) {
  351. /*
  352. * Caller wants a fresh transaction;
  353. * put the work item back on the list
  354. * and jump out.
  355. */
  356. list_add(li, &dfp->dfp_work);
  357. dfp->dfp_count++;
  358. break;
  359. } else if (error) {
  360. /*
  361. * Clean up after ourselves and jump out.
  362. * xfs_defer_cancel will take care of freeing
  363. * all these lists and stuff.
  364. */
  365. if (cleanup_fn)
  366. cleanup_fn(*tp, state, error);
  367. xfs_defer_trans_abort(*tp, dop, error);
  368. goto out;
  369. }
  370. }
  371. if (error == -EAGAIN) {
  372. /*
  373. * Caller wants a fresh transaction, so log a
  374. * new log intent item to replace the old one
  375. * and roll the transaction. See "Requesting
  376. * a Fresh Transaction while Finishing
  377. * Deferred Work" above.
  378. */
  379. dfp->dfp_intent = dfp->dfp_type->create_intent(*tp,
  380. dfp->dfp_count);
  381. dfp->dfp_done = NULL;
  382. list_for_each(li, &dfp->dfp_work)
  383. dfp->dfp_type->log_item(*tp, dfp->dfp_intent,
  384. li);
  385. } else {
  386. /* Done with the dfp, free it. */
  387. list_del(&dfp->dfp_list);
  388. kmem_free(dfp);
  389. }
  390. if (cleanup_fn)
  391. cleanup_fn(*tp, state, error);
  392. }
  393. out:
  394. (*tp)->t_agfl_dfops = orig_dop;
  395. if (error)
  396. trace_xfs_defer_finish_error((*tp)->t_mountp, dop, error);
  397. else
  398. trace_xfs_defer_finish_done((*tp)->t_mountp, dop, _RET_IP_);
  399. return error;
  400. }
  401. /*
  402. * Free up any items left in the list.
  403. */
  404. void
  405. xfs_defer_cancel(
  406. struct xfs_defer_ops *dop)
  407. {
  408. struct xfs_defer_pending *dfp;
  409. struct xfs_defer_pending *pli;
  410. struct list_head *pwi;
  411. struct list_head *n;
  412. trace_xfs_defer_cancel(NULL, dop, _RET_IP_);
  413. /*
  414. * Free the pending items. Caller should already have arranged
  415. * for the intent items to be released.
  416. */
  417. list_for_each_entry_safe(dfp, pli, &dop->dop_intake, dfp_list) {
  418. trace_xfs_defer_intake_cancel(NULL, dfp);
  419. list_del(&dfp->dfp_list);
  420. list_for_each_safe(pwi, n, &dfp->dfp_work) {
  421. list_del(pwi);
  422. dfp->dfp_count--;
  423. dfp->dfp_type->cancel_item(pwi);
  424. }
  425. ASSERT(dfp->dfp_count == 0);
  426. kmem_free(dfp);
  427. }
  428. list_for_each_entry_safe(dfp, pli, &dop->dop_pending, dfp_list) {
  429. trace_xfs_defer_pending_cancel(NULL, dfp);
  430. list_del(&dfp->dfp_list);
  431. list_for_each_safe(pwi, n, &dfp->dfp_work) {
  432. list_del(pwi);
  433. dfp->dfp_count--;
  434. dfp->dfp_type->cancel_item(pwi);
  435. }
  436. ASSERT(dfp->dfp_count == 0);
  437. kmem_free(dfp);
  438. }
  439. }
  440. /* Add an item for later deferred processing. */
  441. void
  442. xfs_defer_add(
  443. struct xfs_defer_ops *dop,
  444. enum xfs_defer_ops_type type,
  445. struct list_head *li)
  446. {
  447. struct xfs_defer_pending *dfp = NULL;
  448. /*
  449. * Add the item to a pending item at the end of the intake list.
  450. * If the last pending item has the same type, reuse it. Else,
  451. * create a new pending item at the end of the intake list.
  452. */
  453. if (!list_empty(&dop->dop_intake)) {
  454. dfp = list_last_entry(&dop->dop_intake,
  455. struct xfs_defer_pending, dfp_list);
  456. if (dfp->dfp_type->type != type ||
  457. (dfp->dfp_type->max_items &&
  458. dfp->dfp_count >= dfp->dfp_type->max_items))
  459. dfp = NULL;
  460. }
  461. if (!dfp) {
  462. dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
  463. KM_SLEEP | KM_NOFS);
  464. dfp->dfp_type = defer_op_types[type];
  465. dfp->dfp_intent = NULL;
  466. dfp->dfp_done = NULL;
  467. dfp->dfp_count = 0;
  468. INIT_LIST_HEAD(&dfp->dfp_work);
  469. list_add_tail(&dfp->dfp_list, &dop->dop_intake);
  470. }
  471. list_add_tail(li, &dfp->dfp_work);
  472. dfp->dfp_count++;
  473. }
  474. /* Initialize a deferred operation list. */
  475. void
  476. xfs_defer_init_op_type(
  477. const struct xfs_defer_op_type *type)
  478. {
  479. defer_op_types[type->type] = type;
  480. }
  481. /* Initialize a deferred operation. */
  482. void
  483. xfs_defer_init(
  484. struct xfs_defer_ops *dop,
  485. xfs_fsblock_t *fbp)
  486. {
  487. memset(dop, 0, sizeof(struct xfs_defer_ops));
  488. *fbp = NULLFSBLOCK;
  489. INIT_LIST_HEAD(&dop->dop_intake);
  490. INIT_LIST_HEAD(&dop->dop_pending);
  491. trace_xfs_defer_init(NULL, dop, _RET_IP_);
  492. }