dm-array.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811
  1. /*
  2. * Copyright (C) 2012 Red Hat, Inc.
  3. *
  4. * This file is released under the GPL.
  5. */
  6. #include "dm-array.h"
  7. #include "dm-space-map.h"
  8. #include "dm-transaction-manager.h"
  9. #include <linux/export.h>
  10. #include <linux/device-mapper.h>
  11. #define DM_MSG_PREFIX "array"
  12. /*----------------------------------------------------------------*/
  13. /*
  14. * The array is implemented as a fully populated btree, which points to
  15. * blocks that contain the packed values. This is more space efficient
  16. * than just using a btree since we don't store 1 key per value.
  17. */
  18. struct array_block {
  19. __le32 csum;
  20. __le32 max_entries;
  21. __le32 nr_entries;
  22. __le32 value_size;
  23. __le64 blocknr; /* Block this node is supposed to live in. */
  24. } __packed;
  25. /*----------------------------------------------------------------*/
  26. /*
  27. * Validator methods. As usual we calculate a checksum, and also write the
  28. * block location into the header (paranoia about ssds remapping areas by
  29. * mistake).
  30. */
  31. #define CSUM_XOR 595846735
  32. static void array_block_prepare_for_write(struct dm_block_validator *v,
  33. struct dm_block *b,
  34. size_t size_of_block)
  35. {
  36. struct array_block *bh_le = dm_block_data(b);
  37. bh_le->blocknr = cpu_to_le64(dm_block_location(b));
  38. bh_le->csum = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
  39. size_of_block - sizeof(__le32),
  40. CSUM_XOR));
  41. }
  42. static int array_block_check(struct dm_block_validator *v,
  43. struct dm_block *b,
  44. size_t size_of_block)
  45. {
  46. struct array_block *bh_le = dm_block_data(b);
  47. __le32 csum_disk;
  48. if (dm_block_location(b) != le64_to_cpu(bh_le->blocknr)) {
  49. DMERR_LIMIT("array_block_check failed: blocknr %llu != wanted %llu",
  50. (unsigned long long) le64_to_cpu(bh_le->blocknr),
  51. (unsigned long long) dm_block_location(b));
  52. return -ENOTBLK;
  53. }
  54. csum_disk = cpu_to_le32(dm_bm_checksum(&bh_le->max_entries,
  55. size_of_block - sizeof(__le32),
  56. CSUM_XOR));
  57. if (csum_disk != bh_le->csum) {
  58. DMERR_LIMIT("array_block_check failed: csum %u != wanted %u",
  59. (unsigned) le32_to_cpu(csum_disk),
  60. (unsigned) le32_to_cpu(bh_le->csum));
  61. return -EILSEQ;
  62. }
  63. return 0;
  64. }
  65. static struct dm_block_validator array_validator = {
  66. .name = "array",
  67. .prepare_for_write = array_block_prepare_for_write,
  68. .check = array_block_check
  69. };
  70. /*----------------------------------------------------------------*/
  71. /*
  72. * Functions for manipulating the array blocks.
  73. */
  74. /*
  75. * Returns a pointer to a value within an array block.
  76. *
  77. * index - The index into _this_ specific block.
  78. */
  79. static void *element_at(struct dm_array_info *info, struct array_block *ab,
  80. unsigned index)
  81. {
  82. unsigned char *entry = (unsigned char *) (ab + 1);
  83. entry += index * info->value_type.size;
  84. return entry;
  85. }
  86. /*
  87. * Utility function that calls one of the value_type methods on every value
  88. * in an array block.
  89. */
  90. static void on_entries(struct dm_array_info *info, struct array_block *ab,
  91. void (*fn)(void *, const void *))
  92. {
  93. unsigned i, nr_entries = le32_to_cpu(ab->nr_entries);
  94. for (i = 0; i < nr_entries; i++)
  95. fn(info->value_type.context, element_at(info, ab, i));
  96. }
  97. /*
  98. * Increment every value in an array block.
  99. */
  100. static void inc_ablock_entries(struct dm_array_info *info, struct array_block *ab)
  101. {
  102. struct dm_btree_value_type *vt = &info->value_type;
  103. if (vt->inc)
  104. on_entries(info, ab, vt->inc);
  105. }
  106. /*
  107. * Decrement every value in an array block.
  108. */
  109. static void dec_ablock_entries(struct dm_array_info *info, struct array_block *ab)
  110. {
  111. struct dm_btree_value_type *vt = &info->value_type;
  112. if (vt->dec)
  113. on_entries(info, ab, vt->dec);
  114. }
  115. /*
  116. * Each array block can hold this many values.
  117. */
  118. static uint32_t calc_max_entries(size_t value_size, size_t size_of_block)
  119. {
  120. return (size_of_block - sizeof(struct array_block)) / value_size;
  121. }
  122. /*
  123. * Allocate a new array block. The caller will need to unlock block.
  124. */
  125. static int alloc_ablock(struct dm_array_info *info, size_t size_of_block,
  126. uint32_t max_entries,
  127. struct dm_block **block, struct array_block **ab)
  128. {
  129. int r;
  130. r = dm_tm_new_block(info->btree_info.tm, &array_validator, block);
  131. if (r)
  132. return r;
  133. (*ab) = dm_block_data(*block);
  134. (*ab)->max_entries = cpu_to_le32(max_entries);
  135. (*ab)->nr_entries = cpu_to_le32(0);
  136. (*ab)->value_size = cpu_to_le32(info->value_type.size);
  137. return 0;
  138. }
  139. /*
  140. * Pad an array block out with a particular value. Every instance will
  141. * cause an increment of the value_type. new_nr must always be more than
  142. * the current number of entries.
  143. */
  144. static void fill_ablock(struct dm_array_info *info, struct array_block *ab,
  145. const void *value, unsigned new_nr)
  146. {
  147. unsigned i;
  148. uint32_t nr_entries;
  149. struct dm_btree_value_type *vt = &info->value_type;
  150. BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
  151. BUG_ON(new_nr < le32_to_cpu(ab->nr_entries));
  152. nr_entries = le32_to_cpu(ab->nr_entries);
  153. for (i = nr_entries; i < new_nr; i++) {
  154. if (vt->inc)
  155. vt->inc(vt->context, value);
  156. memcpy(element_at(info, ab, i), value, vt->size);
  157. }
  158. ab->nr_entries = cpu_to_le32(new_nr);
  159. }
  160. /*
  161. * Remove some entries from the back of an array block. Every value
  162. * removed will be decremented. new_nr must be <= the current number of
  163. * entries.
  164. */
  165. static void trim_ablock(struct dm_array_info *info, struct array_block *ab,
  166. unsigned new_nr)
  167. {
  168. unsigned i;
  169. uint32_t nr_entries;
  170. struct dm_btree_value_type *vt = &info->value_type;
  171. BUG_ON(new_nr > le32_to_cpu(ab->max_entries));
  172. BUG_ON(new_nr > le32_to_cpu(ab->nr_entries));
  173. nr_entries = le32_to_cpu(ab->nr_entries);
  174. for (i = nr_entries; i > new_nr; i--)
  175. if (vt->dec)
  176. vt->dec(vt->context, element_at(info, ab, i - 1));
  177. ab->nr_entries = cpu_to_le32(new_nr);
  178. }
  179. /*
  180. * Read locks a block, and coerces it to an array block. The caller must
  181. * unlock 'block' when finished.
  182. */
  183. static int get_ablock(struct dm_array_info *info, dm_block_t b,
  184. struct dm_block **block, struct array_block **ab)
  185. {
  186. int r;
  187. r = dm_tm_read_lock(info->btree_info.tm, b, &array_validator, block);
  188. if (r)
  189. return r;
  190. *ab = dm_block_data(*block);
  191. return 0;
  192. }
  193. /*
  194. * Unlocks an array block.
  195. */
  196. static int unlock_ablock(struct dm_array_info *info, struct dm_block *block)
  197. {
  198. return dm_tm_unlock(info->btree_info.tm, block);
  199. }
  200. /*----------------------------------------------------------------*/
  201. /*
  202. * Btree manipulation.
  203. */
  204. /*
  205. * Looks up an array block in the btree, and then read locks it.
  206. *
  207. * index is the index of the index of the array_block, (ie. the array index
  208. * / max_entries).
  209. */
  210. static int lookup_ablock(struct dm_array_info *info, dm_block_t root,
  211. unsigned index, struct dm_block **block,
  212. struct array_block **ab)
  213. {
  214. int r;
  215. uint64_t key = index;
  216. __le64 block_le;
  217. r = dm_btree_lookup(&info->btree_info, root, &key, &block_le);
  218. if (r)
  219. return r;
  220. return get_ablock(info, le64_to_cpu(block_le), block, ab);
  221. }
  222. /*
  223. * Insert an array block into the btree. The block is _not_ unlocked.
  224. */
  225. static int insert_ablock(struct dm_array_info *info, uint64_t index,
  226. struct dm_block *block, dm_block_t *root)
  227. {
  228. __le64 block_le = cpu_to_le64(dm_block_location(block));
  229. __dm_bless_for_disk(block_le);
  230. return dm_btree_insert(&info->btree_info, *root, &index, &block_le, root);
  231. }
  232. /*
  233. * Looks up an array block in the btree. Then shadows it, and updates the
  234. * btree to point to this new shadow. 'root' is an input/output parameter
  235. * for both the current root block, and the new one.
  236. */
  237. static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
  238. unsigned index, struct dm_block **block,
  239. struct array_block **ab)
  240. {
  241. int r, inc;
  242. uint64_t key = index;
  243. dm_block_t b;
  244. __le64 block_le;
  245. /*
  246. * lookup
  247. */
  248. r = dm_btree_lookup(&info->btree_info, *root, &key, &block_le);
  249. if (r)
  250. return r;
  251. b = le64_to_cpu(block_le);
  252. /*
  253. * shadow
  254. */
  255. r = dm_tm_shadow_block(info->btree_info.tm, b,
  256. &array_validator, block, &inc);
  257. if (r)
  258. return r;
  259. *ab = dm_block_data(*block);
  260. if (inc)
  261. inc_ablock_entries(info, *ab);
  262. /*
  263. * Reinsert.
  264. *
  265. * The shadow op will often be a noop. Only insert if it really
  266. * copied data.
  267. */
  268. if (dm_block_location(*block) != b)
  269. r = insert_ablock(info, index, *block, root);
  270. return r;
  271. }
  272. /*
  273. * Allocate an new array block, and fill it with some values.
  274. */
  275. static int insert_new_ablock(struct dm_array_info *info, size_t size_of_block,
  276. uint32_t max_entries,
  277. unsigned block_index, uint32_t nr,
  278. const void *value, dm_block_t *root)
  279. {
  280. int r;
  281. struct dm_block *block;
  282. struct array_block *ab;
  283. r = alloc_ablock(info, size_of_block, max_entries, &block, &ab);
  284. if (r)
  285. return r;
  286. fill_ablock(info, ab, value, nr);
  287. r = insert_ablock(info, block_index, block, root);
  288. unlock_ablock(info, block);
  289. return r;
  290. }
  291. static int insert_full_ablocks(struct dm_array_info *info, size_t size_of_block,
  292. unsigned begin_block, unsigned end_block,
  293. unsigned max_entries, const void *value,
  294. dm_block_t *root)
  295. {
  296. int r = 0;
  297. for (; !r && begin_block != end_block; begin_block++)
  298. r = insert_new_ablock(info, size_of_block, max_entries, begin_block, max_entries, value, root);
  299. return r;
  300. }
  301. /*
  302. * There are a bunch of functions involved with resizing an array. This
  303. * structure holds information that commonly needed by them. Purely here
  304. * to reduce parameter count.
  305. */
  306. struct resize {
  307. /*
  308. * Describes the array.
  309. */
  310. struct dm_array_info *info;
  311. /*
  312. * The current root of the array. This gets updated.
  313. */
  314. dm_block_t root;
  315. /*
  316. * Metadata block size. Used to calculate the nr entries in an
  317. * array block.
  318. */
  319. size_t size_of_block;
  320. /*
  321. * Maximum nr entries in an array block.
  322. */
  323. unsigned max_entries;
  324. /*
  325. * nr of completely full blocks in the array.
  326. *
  327. * 'old' refers to before the resize, 'new' after.
  328. */
  329. unsigned old_nr_full_blocks, new_nr_full_blocks;
  330. /*
  331. * Number of entries in the final block. 0 iff only full blocks in
  332. * the array.
  333. */
  334. unsigned old_nr_entries_in_last_block, new_nr_entries_in_last_block;
  335. /*
  336. * The default value used when growing the array.
  337. */
  338. const void *value;
  339. };
  340. /*
  341. * Removes a consecutive set of array blocks from the btree. The values
  342. * in block are decremented as a side effect of the btree remove.
  343. *
  344. * begin_index - the index of the first array block to remove.
  345. * end_index - the one-past-the-end value. ie. this block is not removed.
  346. */
  347. static int drop_blocks(struct resize *resize, unsigned begin_index,
  348. unsigned end_index)
  349. {
  350. int r;
  351. while (begin_index != end_index) {
  352. uint64_t key = begin_index++;
  353. r = dm_btree_remove(&resize->info->btree_info, resize->root,
  354. &key, &resize->root);
  355. if (r)
  356. return r;
  357. }
  358. return 0;
  359. }
  360. /*
  361. * Calculates how many blocks are needed for the array.
  362. */
  363. static unsigned total_nr_blocks_needed(unsigned nr_full_blocks,
  364. unsigned nr_entries_in_last_block)
  365. {
  366. return nr_full_blocks + (nr_entries_in_last_block ? 1 : 0);
  367. }
  368. /*
  369. * Shrink an array.
  370. */
  371. static int shrink(struct resize *resize)
  372. {
  373. int r;
  374. unsigned begin, end;
  375. struct dm_block *block;
  376. struct array_block *ab;
  377. /*
  378. * Lose some blocks from the back?
  379. */
  380. if (resize->new_nr_full_blocks < resize->old_nr_full_blocks) {
  381. begin = total_nr_blocks_needed(resize->new_nr_full_blocks,
  382. resize->new_nr_entries_in_last_block);
  383. end = total_nr_blocks_needed(resize->old_nr_full_blocks,
  384. resize->old_nr_entries_in_last_block);
  385. r = drop_blocks(resize, begin, end);
  386. if (r)
  387. return r;
  388. }
  389. /*
  390. * Trim the new tail block
  391. */
  392. if (resize->new_nr_entries_in_last_block) {
  393. r = shadow_ablock(resize->info, &resize->root,
  394. resize->new_nr_full_blocks, &block, &ab);
  395. if (r)
  396. return r;
  397. trim_ablock(resize->info, ab, resize->new_nr_entries_in_last_block);
  398. unlock_ablock(resize->info, block);
  399. }
  400. return 0;
  401. }
  402. /*
  403. * Grow an array.
  404. */
  405. static int grow_extend_tail_block(struct resize *resize, uint32_t new_nr_entries)
  406. {
  407. int r;
  408. struct dm_block *block;
  409. struct array_block *ab;
  410. r = shadow_ablock(resize->info, &resize->root,
  411. resize->old_nr_full_blocks, &block, &ab);
  412. if (r)
  413. return r;
  414. fill_ablock(resize->info, ab, resize->value, new_nr_entries);
  415. unlock_ablock(resize->info, block);
  416. return r;
  417. }
  418. static int grow_add_tail_block(struct resize *resize)
  419. {
  420. return insert_new_ablock(resize->info, resize->size_of_block,
  421. resize->max_entries,
  422. resize->new_nr_full_blocks,
  423. resize->new_nr_entries_in_last_block,
  424. resize->value, &resize->root);
  425. }
  426. static int grow_needs_more_blocks(struct resize *resize)
  427. {
  428. int r;
  429. unsigned old_nr_blocks = resize->old_nr_full_blocks;
  430. if (resize->old_nr_entries_in_last_block > 0) {
  431. old_nr_blocks++;
  432. r = grow_extend_tail_block(resize, resize->max_entries);
  433. if (r)
  434. return r;
  435. }
  436. r = insert_full_ablocks(resize->info, resize->size_of_block,
  437. old_nr_blocks,
  438. resize->new_nr_full_blocks,
  439. resize->max_entries, resize->value,
  440. &resize->root);
  441. if (r)
  442. return r;
  443. if (resize->new_nr_entries_in_last_block)
  444. r = grow_add_tail_block(resize);
  445. return r;
  446. }
  447. static int grow(struct resize *resize)
  448. {
  449. if (resize->new_nr_full_blocks > resize->old_nr_full_blocks)
  450. return grow_needs_more_blocks(resize);
  451. else if (resize->old_nr_entries_in_last_block)
  452. return grow_extend_tail_block(resize, resize->new_nr_entries_in_last_block);
  453. else
  454. return grow_add_tail_block(resize);
  455. }
  456. /*----------------------------------------------------------------*/
  457. /*
  458. * These are the value_type functions for the btree elements, which point
  459. * to array blocks.
  460. */
  461. static void block_inc(void *context, const void *value)
  462. {
  463. __le64 block_le;
  464. struct dm_array_info *info = context;
  465. memcpy(&block_le, value, sizeof(block_le));
  466. dm_tm_inc(info->btree_info.tm, le64_to_cpu(block_le));
  467. }
  468. static void block_dec(void *context, const void *value)
  469. {
  470. int r;
  471. uint64_t b;
  472. __le64 block_le;
  473. uint32_t ref_count;
  474. struct dm_block *block;
  475. struct array_block *ab;
  476. struct dm_array_info *info = context;
  477. memcpy(&block_le, value, sizeof(block_le));
  478. b = le64_to_cpu(block_le);
  479. r = dm_tm_ref(info->btree_info.tm, b, &ref_count);
  480. if (r) {
  481. DMERR_LIMIT("couldn't get reference count for block %llu",
  482. (unsigned long long) b);
  483. return;
  484. }
  485. if (ref_count == 1) {
  486. /*
  487. * We're about to drop the last reference to this ablock.
  488. * So we need to decrement the ref count of the contents.
  489. */
  490. r = get_ablock(info, b, &block, &ab);
  491. if (r) {
  492. DMERR_LIMIT("couldn't get array block %llu",
  493. (unsigned long long) b);
  494. return;
  495. }
  496. dec_ablock_entries(info, ab);
  497. unlock_ablock(info, block);
  498. }
  499. dm_tm_dec(info->btree_info.tm, b);
  500. }
  501. static int block_equal(void *context, const void *value1, const void *value2)
  502. {
  503. return !memcmp(value1, value2, sizeof(__le64));
  504. }
  505. /*----------------------------------------------------------------*/
  506. void dm_array_info_init(struct dm_array_info *info,
  507. struct dm_transaction_manager *tm,
  508. struct dm_btree_value_type *vt)
  509. {
  510. struct dm_btree_value_type *bvt = &info->btree_info.value_type;
  511. memcpy(&info->value_type, vt, sizeof(info->value_type));
  512. info->btree_info.tm = tm;
  513. info->btree_info.levels = 1;
  514. bvt->context = info;
  515. bvt->size = sizeof(__le64);
  516. bvt->inc = block_inc;
  517. bvt->dec = block_dec;
  518. bvt->equal = block_equal;
  519. }
  520. EXPORT_SYMBOL_GPL(dm_array_info_init);
  521. int dm_array_empty(struct dm_array_info *info, dm_block_t *root)
  522. {
  523. return dm_btree_empty(&info->btree_info, root);
  524. }
  525. EXPORT_SYMBOL_GPL(dm_array_empty);
  526. static int array_resize(struct dm_array_info *info, dm_block_t root,
  527. uint32_t old_size, uint32_t new_size,
  528. const void *value, dm_block_t *new_root)
  529. {
  530. int r;
  531. struct resize resize;
  532. if (old_size == new_size)
  533. return 0;
  534. resize.info = info;
  535. resize.root = root;
  536. resize.size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
  537. resize.max_entries = calc_max_entries(info->value_type.size,
  538. resize.size_of_block);
  539. resize.old_nr_full_blocks = old_size / resize.max_entries;
  540. resize.old_nr_entries_in_last_block = old_size % resize.max_entries;
  541. resize.new_nr_full_blocks = new_size / resize.max_entries;
  542. resize.new_nr_entries_in_last_block = new_size % resize.max_entries;
  543. resize.value = value;
  544. r = ((new_size > old_size) ? grow : shrink)(&resize);
  545. if (r)
  546. return r;
  547. *new_root = resize.root;
  548. return 0;
  549. }
  550. int dm_array_resize(struct dm_array_info *info, dm_block_t root,
  551. uint32_t old_size, uint32_t new_size,
  552. const void *value, dm_block_t *new_root)
  553. __dm_written_to_disk(value)
  554. {
  555. int r = array_resize(info, root, old_size, new_size, value, new_root);
  556. __dm_unbless_for_disk(value);
  557. return r;
  558. }
  559. EXPORT_SYMBOL_GPL(dm_array_resize);
  560. int dm_array_del(struct dm_array_info *info, dm_block_t root)
  561. {
  562. return dm_btree_del(&info->btree_info, root);
  563. }
  564. EXPORT_SYMBOL_GPL(dm_array_del);
  565. int dm_array_get_value(struct dm_array_info *info, dm_block_t root,
  566. uint32_t index, void *value_le)
  567. {
  568. int r;
  569. struct dm_block *block;
  570. struct array_block *ab;
  571. size_t size_of_block;
  572. unsigned entry, max_entries;
  573. size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
  574. max_entries = calc_max_entries(info->value_type.size, size_of_block);
  575. r = lookup_ablock(info, root, index / max_entries, &block, &ab);
  576. if (r)
  577. return r;
  578. entry = index % max_entries;
  579. if (entry >= le32_to_cpu(ab->nr_entries))
  580. r = -ENODATA;
  581. else
  582. memcpy(value_le, element_at(info, ab, entry),
  583. info->value_type.size);
  584. unlock_ablock(info, block);
  585. return r;
  586. }
  587. EXPORT_SYMBOL_GPL(dm_array_get_value);
  588. static int array_set_value(struct dm_array_info *info, dm_block_t root,
  589. uint32_t index, const void *value, dm_block_t *new_root)
  590. {
  591. int r;
  592. struct dm_block *block;
  593. struct array_block *ab;
  594. size_t size_of_block;
  595. unsigned max_entries;
  596. unsigned entry;
  597. void *old_value;
  598. struct dm_btree_value_type *vt = &info->value_type;
  599. size_of_block = dm_bm_block_size(dm_tm_get_bm(info->btree_info.tm));
  600. max_entries = calc_max_entries(info->value_type.size, size_of_block);
  601. r = shadow_ablock(info, &root, index / max_entries, &block, &ab);
  602. if (r)
  603. return r;
  604. *new_root = root;
  605. entry = index % max_entries;
  606. if (entry >= le32_to_cpu(ab->nr_entries)) {
  607. r = -ENODATA;
  608. goto out;
  609. }
  610. old_value = element_at(info, ab, entry);
  611. if (vt->dec &&
  612. (!vt->equal || !vt->equal(vt->context, old_value, value))) {
  613. vt->dec(vt->context, old_value);
  614. if (vt->inc)
  615. vt->inc(vt->context, value);
  616. }
  617. memcpy(old_value, value, info->value_type.size);
  618. out:
  619. unlock_ablock(info, block);
  620. return r;
  621. }
  622. int dm_array_set_value(struct dm_array_info *info, dm_block_t root,
  623. uint32_t index, const void *value, dm_block_t *new_root)
  624. __dm_written_to_disk(value)
  625. {
  626. int r;
  627. r = array_set_value(info, root, index, value, new_root);
  628. __dm_unbless_for_disk(value);
  629. return r;
  630. }
  631. EXPORT_SYMBOL_GPL(dm_array_set_value);
  632. struct walk_info {
  633. struct dm_array_info *info;
  634. int (*fn)(void *context, uint64_t key, void *leaf);
  635. void *context;
  636. };
  637. static int walk_ablock(void *context, uint64_t *keys, void *leaf)
  638. {
  639. struct walk_info *wi = context;
  640. int r;
  641. unsigned i;
  642. __le64 block_le;
  643. unsigned nr_entries, max_entries;
  644. struct dm_block *block;
  645. struct array_block *ab;
  646. memcpy(&block_le, leaf, sizeof(block_le));
  647. r = get_ablock(wi->info, le64_to_cpu(block_le), &block, &ab);
  648. if (r)
  649. return r;
  650. max_entries = le32_to_cpu(ab->max_entries);
  651. nr_entries = le32_to_cpu(ab->nr_entries);
  652. for (i = 0; i < nr_entries; i++) {
  653. r = wi->fn(wi->context, keys[0] * max_entries + i,
  654. element_at(wi->info, ab, i));
  655. if (r)
  656. break;
  657. }
  658. unlock_ablock(wi->info, block);
  659. return r;
  660. }
  661. int dm_array_walk(struct dm_array_info *info, dm_block_t root,
  662. int (*fn)(void *, uint64_t key, void *leaf),
  663. void *context)
  664. {
  665. struct walk_info wi;
  666. wi.info = info;
  667. wi.fn = fn;
  668. wi.context = context;
  669. return dm_btree_walk(&info->btree_info, root, walk_ablock, &wi);
  670. }
  671. EXPORT_SYMBOL_GPL(dm_array_walk);
  672. /*----------------------------------------------------------------*/