dm-space-map-metadata.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794
  1. /*
  2. * Copyright (C) 2011 Red Hat, Inc.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm-space-map.h"
  7. #include "dm-space-map-common.h"
  8. #include "dm-space-map-metadata.h"
  9. #include <linux/list.h>
  10. #include <linux/slab.h>
  11. #include <linux/device-mapper.h>
  12. #define DM_MSG_PREFIX "space map metadata"
  13. /*----------------------------------------------------------------*/
  14. /*
  15. * An edge triggered threshold.
  16. */
  17. struct threshold {
  18. bool threshold_set;
  19. bool value_set;
  20. dm_block_t threshold;
  21. dm_block_t current_value;
  22. dm_sm_threshold_fn fn;
  23. void *context;
  24. };
  25. static void threshold_init(struct threshold *t)
  26. {
  27. t->threshold_set = false;
  28. t->value_set = false;
  29. }
  30. static void set_threshold(struct threshold *t, dm_block_t value,
  31. dm_sm_threshold_fn fn, void *context)
  32. {
  33. t->threshold_set = true;
  34. t->threshold = value;
  35. t->fn = fn;
  36. t->context = context;
  37. }
  38. static bool below_threshold(struct threshold *t, dm_block_t value)
  39. {
  40. return t->threshold_set && value <= t->threshold;
  41. }
  42. static bool threshold_already_triggered(struct threshold *t)
  43. {
  44. return t->value_set && below_threshold(t, t->current_value);
  45. }
  46. static void check_threshold(struct threshold *t, dm_block_t value)
  47. {
  48. if (below_threshold(t, value) &&
  49. !threshold_already_triggered(t))
  50. t->fn(t->context);
  51. t->value_set = true;
  52. t->current_value = value;
  53. }
  54. /*----------------------------------------------------------------*/
  55. /*
  56. * Space map interface.
  57. *
  58. * The low level disk format is written using the standard btree and
  59. * transaction manager. This means that performing disk operations may
  60. * cause us to recurse into the space map in order to allocate new blocks.
  61. * For this reason we have a pool of pre-allocated blocks large enough to
  62. * service any metadata_ll_disk operation.
  63. */
  64. /*
  65. * FIXME: we should calculate this based on the size of the device.
  66. * Only the metadata space map needs this functionality.
  67. */
  68. #define MAX_RECURSIVE_ALLOCATIONS 1024
  69. enum block_op_type {
  70. BOP_INC,
  71. BOP_DEC
  72. };
  73. struct block_op {
  74. enum block_op_type type;
  75. dm_block_t block;
  76. };
  77. struct bop_ring_buffer {
  78. unsigned begin;
  79. unsigned end;
  80. struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
  81. };
  82. static void brb_init(struct bop_ring_buffer *brb)
  83. {
  84. brb->begin = 0;
  85. brb->end = 0;
  86. }
  87. static bool brb_empty(struct bop_ring_buffer *brb)
  88. {
  89. return brb->begin == brb->end;
  90. }
  91. static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
  92. {
  93. unsigned r = old + 1;
  94. return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
  95. }
  96. static int brb_push(struct bop_ring_buffer *brb,
  97. enum block_op_type type, dm_block_t b)
  98. {
  99. struct block_op *bop;
  100. unsigned next = brb_next(brb, brb->end);
  101. /*
  102. * We don't allow the last bop to be filled, this way we can
  103. * differentiate between full and empty.
  104. */
  105. if (next == brb->begin)
  106. return -ENOMEM;
  107. bop = brb->bops + brb->end;
  108. bop->type = type;
  109. bop->block = b;
  110. brb->end = next;
  111. return 0;
  112. }
  113. static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
  114. {
  115. struct block_op *bop;
  116. if (brb_empty(brb))
  117. return -ENODATA;
  118. bop = brb->bops + brb->begin;
  119. result->type = bop->type;
  120. result->block = bop->block;
  121. brb->begin = brb_next(brb, brb->begin);
  122. return 0;
  123. }
  124. /*----------------------------------------------------------------*/
  125. struct sm_metadata {
  126. struct dm_space_map sm;
  127. struct ll_disk ll;
  128. struct ll_disk old_ll;
  129. dm_block_t begin;
  130. unsigned recursion_count;
  131. unsigned allocated_this_transaction;
  132. struct bop_ring_buffer uncommitted;
  133. struct threshold threshold;
  134. };
  135. static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
  136. {
  137. int r = brb_push(&smm->uncommitted, type, b);
  138. if (r) {
  139. DMERR("too many recursive allocations");
  140. return -ENOMEM;
  141. }
  142. return 0;
  143. }
  144. static int commit_bop(struct sm_metadata *smm, struct block_op *op)
  145. {
  146. int r = 0;
  147. enum allocation_event ev;
  148. switch (op->type) {
  149. case BOP_INC:
  150. r = sm_ll_inc(&smm->ll, op->block, &ev);
  151. break;
  152. case BOP_DEC:
  153. r = sm_ll_dec(&smm->ll, op->block, &ev);
  154. break;
  155. }
  156. return r;
  157. }
  158. static void in(struct sm_metadata *smm)
  159. {
  160. smm->recursion_count++;
  161. }
  162. static int out(struct sm_metadata *smm)
  163. {
  164. int r = 0;
  165. /*
  166. * If we're not recursing then very bad things are happening.
  167. */
  168. if (!smm->recursion_count) {
  169. DMERR("lost track of recursion depth");
  170. return -ENOMEM;
  171. }
  172. if (smm->recursion_count == 1) {
  173. while (!brb_empty(&smm->uncommitted)) {
  174. struct block_op bop;
  175. r = brb_pop(&smm->uncommitted, &bop);
  176. if (r) {
  177. DMERR("bug in bop ring buffer");
  178. break;
  179. }
  180. r = commit_bop(smm, &bop);
  181. if (r)
  182. break;
  183. }
  184. }
  185. smm->recursion_count--;
  186. return r;
  187. }
  188. /*
  189. * When using the out() function above, we often want to combine an error
  190. * code for the operation run in the recursive context with that from
  191. * out().
  192. */
  193. static int combine_errors(int r1, int r2)
  194. {
  195. return r1 ? r1 : r2;
  196. }
  197. static int recursing(struct sm_metadata *smm)
  198. {
  199. return smm->recursion_count;
  200. }
  201. static void sm_metadata_destroy(struct dm_space_map *sm)
  202. {
  203. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  204. kfree(smm);
  205. }
  206. static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
  207. {
  208. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  209. *count = smm->ll.nr_blocks;
  210. return 0;
  211. }
  212. static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
  213. {
  214. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  215. *count = smm->old_ll.nr_blocks - smm->old_ll.nr_allocated -
  216. smm->allocated_this_transaction;
  217. return 0;
  218. }
  219. static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
  220. uint32_t *result)
  221. {
  222. int r;
  223. unsigned i;
  224. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  225. unsigned adjustment = 0;
  226. /*
  227. * We may have some uncommitted adjustments to add. This list
  228. * should always be really short.
  229. */
  230. for (i = smm->uncommitted.begin;
  231. i != smm->uncommitted.end;
  232. i = brb_next(&smm->uncommitted, i)) {
  233. struct block_op *op = smm->uncommitted.bops + i;
  234. if (op->block != b)
  235. continue;
  236. switch (op->type) {
  237. case BOP_INC:
  238. adjustment++;
  239. break;
  240. case BOP_DEC:
  241. adjustment--;
  242. break;
  243. }
  244. }
  245. r = sm_ll_lookup(&smm->ll, b, result);
  246. if (r)
  247. return r;
  248. *result += adjustment;
  249. return 0;
  250. }
  251. static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
  252. dm_block_t b, int *result)
  253. {
  254. int r, adjustment = 0;
  255. unsigned i;
  256. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  257. uint32_t rc;
  258. /*
  259. * We may have some uncommitted adjustments to add. This list
  260. * should always be really short.
  261. */
  262. for (i = smm->uncommitted.begin;
  263. i != smm->uncommitted.end;
  264. i = brb_next(&smm->uncommitted, i)) {
  265. struct block_op *op = smm->uncommitted.bops + i;
  266. if (op->block != b)
  267. continue;
  268. switch (op->type) {
  269. case BOP_INC:
  270. adjustment++;
  271. break;
  272. case BOP_DEC:
  273. adjustment--;
  274. break;
  275. }
  276. }
  277. if (adjustment > 1) {
  278. *result = 1;
  279. return 0;
  280. }
  281. r = sm_ll_lookup_bitmap(&smm->ll, b, &rc);
  282. if (r)
  283. return r;
  284. if (rc == 3)
  285. /*
  286. * We err on the side of caution, and always return true.
  287. */
  288. *result = 1;
  289. else
  290. *result = rc + adjustment > 1;
  291. return 0;
  292. }
  293. static int sm_metadata_set_count(struct dm_space_map *sm, dm_block_t b,
  294. uint32_t count)
  295. {
  296. int r, r2;
  297. enum allocation_event ev;
  298. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  299. if (smm->recursion_count) {
  300. DMERR("cannot recurse set_count()");
  301. return -EINVAL;
  302. }
  303. in(smm);
  304. r = sm_ll_insert(&smm->ll, b, count, &ev);
  305. r2 = out(smm);
  306. return combine_errors(r, r2);
  307. }
  308. static int sm_metadata_inc_block(struct dm_space_map *sm, dm_block_t b)
  309. {
  310. int r, r2 = 0;
  311. enum allocation_event ev;
  312. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  313. if (recursing(smm))
  314. r = add_bop(smm, BOP_INC, b);
  315. else {
  316. in(smm);
  317. r = sm_ll_inc(&smm->ll, b, &ev);
  318. r2 = out(smm);
  319. }
  320. return combine_errors(r, r2);
  321. }
  322. static int sm_metadata_dec_block(struct dm_space_map *sm, dm_block_t b)
  323. {
  324. int r, r2 = 0;
  325. enum allocation_event ev;
  326. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  327. if (recursing(smm))
  328. r = add_bop(smm, BOP_DEC, b);
  329. else {
  330. in(smm);
  331. r = sm_ll_dec(&smm->ll, b, &ev);
  332. r2 = out(smm);
  333. }
  334. return combine_errors(r, r2);
  335. }
  336. static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b)
  337. {
  338. int r, r2 = 0;
  339. enum allocation_event ev;
  340. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  341. r = sm_ll_find_free_block(&smm->old_ll, smm->begin, smm->old_ll.nr_blocks, b);
  342. if (r)
  343. return r;
  344. smm->begin = *b + 1;
  345. if (recursing(smm))
  346. r = add_bop(smm, BOP_INC, *b);
  347. else {
  348. in(smm);
  349. r = sm_ll_inc(&smm->ll, *b, &ev);
  350. r2 = out(smm);
  351. }
  352. if (!r)
  353. smm->allocated_this_transaction++;
  354. return combine_errors(r, r2);
  355. }
  356. static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
  357. {
  358. dm_block_t count;
  359. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  360. int r = sm_metadata_new_block_(sm, b);
  361. if (r) {
  362. DMERR_LIMIT("unable to allocate new metadata block");
  363. return r;
  364. }
  365. r = sm_metadata_get_nr_free(sm, &count);
  366. if (r) {
  367. DMERR_LIMIT("couldn't get free block count");
  368. return r;
  369. }
  370. check_threshold(&smm->threshold, count);
  371. return r;
  372. }
  373. static int sm_metadata_commit(struct dm_space_map *sm)
  374. {
  375. int r;
  376. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  377. r = sm_ll_commit(&smm->ll);
  378. if (r)
  379. return r;
  380. memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
  381. smm->begin = 0;
  382. smm->allocated_this_transaction = 0;
  383. return 0;
  384. }
  385. static int sm_metadata_register_threshold_callback(struct dm_space_map *sm,
  386. dm_block_t threshold,
  387. dm_sm_threshold_fn fn,
  388. void *context)
  389. {
  390. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  391. set_threshold(&smm->threshold, threshold, fn, context);
  392. return 0;
  393. }
  394. static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result)
  395. {
  396. *result = sizeof(struct disk_sm_root);
  397. return 0;
  398. }
  399. static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t max)
  400. {
  401. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  402. struct disk_sm_root root_le;
  403. root_le.nr_blocks = cpu_to_le64(smm->ll.nr_blocks);
  404. root_le.nr_allocated = cpu_to_le64(smm->ll.nr_allocated);
  405. root_le.bitmap_root = cpu_to_le64(smm->ll.bitmap_root);
  406. root_le.ref_count_root = cpu_to_le64(smm->ll.ref_count_root);
  407. if (max < sizeof(root_le))
  408. return -ENOSPC;
  409. memcpy(where_le, &root_le, sizeof(root_le));
  410. return 0;
  411. }
  412. static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks);
  413. static struct dm_space_map ops = {
  414. .destroy = sm_metadata_destroy,
  415. .extend = sm_metadata_extend,
  416. .get_nr_blocks = sm_metadata_get_nr_blocks,
  417. .get_nr_free = sm_metadata_get_nr_free,
  418. .get_count = sm_metadata_get_count,
  419. .count_is_more_than_one = sm_metadata_count_is_more_than_one,
  420. .set_count = sm_metadata_set_count,
  421. .inc_block = sm_metadata_inc_block,
  422. .dec_block = sm_metadata_dec_block,
  423. .new_block = sm_metadata_new_block,
  424. .commit = sm_metadata_commit,
  425. .root_size = sm_metadata_root_size,
  426. .copy_root = sm_metadata_copy_root,
  427. .register_threshold_callback = sm_metadata_register_threshold_callback
  428. };
  429. /*----------------------------------------------------------------*/
  430. /*
  431. * When a new space map is created that manages its own space. We use
  432. * this tiny bootstrap allocator.
  433. */
  434. static void sm_bootstrap_destroy(struct dm_space_map *sm)
  435. {
  436. }
  437. static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
  438. {
  439. DMERR("bootstrap doesn't support extend");
  440. return -EINVAL;
  441. }
  442. static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count)
  443. {
  444. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  445. return smm->ll.nr_blocks;
  446. }
  447. static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
  448. {
  449. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  450. *count = smm->ll.nr_blocks - smm->begin;
  451. return 0;
  452. }
  453. static int sm_bootstrap_get_count(struct dm_space_map *sm, dm_block_t b,
  454. uint32_t *result)
  455. {
  456. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  457. return b < smm->begin ? 1 : 0;
  458. }
  459. static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm,
  460. dm_block_t b, int *result)
  461. {
  462. *result = 0;
  463. return 0;
  464. }
  465. static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b,
  466. uint32_t count)
  467. {
  468. DMERR("bootstrap doesn't support set_count");
  469. return -EINVAL;
  470. }
  471. static int sm_bootstrap_new_block(struct dm_space_map *sm, dm_block_t *b)
  472. {
  473. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  474. /*
  475. * We know the entire device is unused.
  476. */
  477. if (smm->begin == smm->ll.nr_blocks)
  478. return -ENOSPC;
  479. *b = smm->begin++;
  480. return 0;
  481. }
  482. static int sm_bootstrap_inc_block(struct dm_space_map *sm, dm_block_t b)
  483. {
  484. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  485. return add_bop(smm, BOP_INC, b);
  486. }
  487. static int sm_bootstrap_dec_block(struct dm_space_map *sm, dm_block_t b)
  488. {
  489. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  490. return add_bop(smm, BOP_DEC, b);
  491. }
  492. static int sm_bootstrap_commit(struct dm_space_map *sm)
  493. {
  494. return 0;
  495. }
  496. static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result)
  497. {
  498. DMERR("bootstrap doesn't support root_size");
  499. return -EINVAL;
  500. }
  501. static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where,
  502. size_t max)
  503. {
  504. DMERR("bootstrap doesn't support copy_root");
  505. return -EINVAL;
  506. }
  507. static struct dm_space_map bootstrap_ops = {
  508. .destroy = sm_bootstrap_destroy,
  509. .extend = sm_bootstrap_extend,
  510. .get_nr_blocks = sm_bootstrap_get_nr_blocks,
  511. .get_nr_free = sm_bootstrap_get_nr_free,
  512. .get_count = sm_bootstrap_get_count,
  513. .count_is_more_than_one = sm_bootstrap_count_is_more_than_one,
  514. .set_count = sm_bootstrap_set_count,
  515. .inc_block = sm_bootstrap_inc_block,
  516. .dec_block = sm_bootstrap_dec_block,
  517. .new_block = sm_bootstrap_new_block,
  518. .commit = sm_bootstrap_commit,
  519. .root_size = sm_bootstrap_root_size,
  520. .copy_root = sm_bootstrap_copy_root,
  521. .register_threshold_callback = NULL
  522. };
  523. /*----------------------------------------------------------------*/
  524. static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
  525. {
  526. int r, i;
  527. enum allocation_event ev;
  528. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  529. dm_block_t old_len = smm->ll.nr_blocks;
  530. /*
  531. * Flick into a mode where all blocks get allocated in the new area.
  532. */
  533. smm->begin = old_len;
  534. memcpy(sm, &bootstrap_ops, sizeof(*sm));
  535. /*
  536. * Extend.
  537. */
  538. r = sm_ll_extend(&smm->ll, extra_blocks);
  539. if (r)
  540. goto out;
  541. /*
  542. * We repeatedly increment then commit until the commit doesn't
  543. * allocate any new blocks.
  544. */
  545. do {
  546. for (i = old_len; !r && i < smm->begin; i++) {
  547. r = sm_ll_inc(&smm->ll, i, &ev);
  548. if (r)
  549. goto out;
  550. }
  551. old_len = smm->begin;
  552. r = sm_ll_commit(&smm->ll);
  553. if (r)
  554. goto out;
  555. } while (old_len != smm->begin);
  556. out:
  557. /*
  558. * Switch back to normal behaviour.
  559. */
  560. memcpy(sm, &ops, sizeof(*sm));
  561. return r;
  562. }
  563. /*----------------------------------------------------------------*/
  564. struct dm_space_map *dm_sm_metadata_init(void)
  565. {
  566. struct sm_metadata *smm;
  567. smm = kmalloc(sizeof(*smm), GFP_KERNEL);
  568. if (!smm)
  569. return ERR_PTR(-ENOMEM);
  570. memcpy(&smm->sm, &ops, sizeof(smm->sm));
  571. return &smm->sm;
  572. }
  573. int dm_sm_metadata_create(struct dm_space_map *sm,
  574. struct dm_transaction_manager *tm,
  575. dm_block_t nr_blocks,
  576. dm_block_t superblock)
  577. {
  578. int r;
  579. dm_block_t i;
  580. enum allocation_event ev;
  581. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  582. smm->begin = superblock + 1;
  583. smm->recursion_count = 0;
  584. smm->allocated_this_transaction = 0;
  585. brb_init(&smm->uncommitted);
  586. threshold_init(&smm->threshold);
  587. memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
  588. r = sm_ll_new_metadata(&smm->ll, tm);
  589. if (r)
  590. return r;
  591. if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
  592. nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
  593. r = sm_ll_extend(&smm->ll, nr_blocks);
  594. if (r)
  595. return r;
  596. memcpy(&smm->sm, &ops, sizeof(smm->sm));
  597. /*
  598. * Now we need to update the newly created data structures with the
  599. * allocated blocks that they were built from.
  600. */
  601. for (i = superblock; !r && i < smm->begin; i++)
  602. r = sm_ll_inc(&smm->ll, i, &ev);
  603. if (r)
  604. return r;
  605. return sm_metadata_commit(sm);
  606. }
  607. int dm_sm_metadata_open(struct dm_space_map *sm,
  608. struct dm_transaction_manager *tm,
  609. void *root_le, size_t len)
  610. {
  611. int r;
  612. struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
  613. r = sm_ll_open_metadata(&smm->ll, tm, root_le, len);
  614. if (r)
  615. return r;
  616. smm->begin = 0;
  617. smm->recursion_count = 0;
  618. smm->allocated_this_transaction = 0;
  619. brb_init(&smm->uncommitted);
  620. threshold_init(&smm->threshold);
  621. memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
  622. return 0;
  623. }