dm-log.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. /*
  2. * Copyright (C) 2003 Sistina Software
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
  4. *
  5. * This file is released under the LGPL.
  6. */
  7. #include <linux/init.h>
  8. #include <linux/slab.h>
  9. #include <linux/module.h>
  10. #include <linux/vmalloc.h>
  11. #include <linux/dm-io.h>
  12. #include <linux/dm-dirty-log.h>
  13. #include <linux/device-mapper.h>
  14. #define DM_MSG_PREFIX "dirty region log"
  15. struct dm_dirty_log_internal {
  16. struct dm_dirty_log_type *type;
  17. struct list_head list;
  18. };
  19. static LIST_HEAD(_log_types);
  20. static DEFINE_SPINLOCK(_lock);
  21. static struct dm_dirty_log_internal *__find_dirty_log_type(const char *name)
  22. {
  23. struct dm_dirty_log_internal *log_type;
  24. list_for_each_entry(log_type, &_log_types, list)
  25. if (!strcmp(name, log_type->type->name))
  26. return log_type;
  27. return NULL;
  28. }
  29. static struct dm_dirty_log_internal *_get_dirty_log_type(const char *name)
  30. {
  31. struct dm_dirty_log_internal *log_type;
  32. spin_lock(&_lock);
  33. log_type = __find_dirty_log_type(name);
  34. if (log_type && !try_module_get(log_type->type->module))
  35. log_type = NULL;
  36. spin_unlock(&_lock);
  37. return log_type;
  38. }
  39. /*
  40. * get_type
  41. * @type_name
  42. *
  43. * Attempt to retrieve the dm_dirty_log_type by name. If not already
  44. * available, attempt to load the appropriate module.
  45. *
  46. * Log modules are named "dm-log-" followed by the 'type_name'.
  47. * Modules may contain multiple types.
  48. * This function will first try the module "dm-log-<type_name>",
  49. * then truncate 'type_name' on the last '-' and try again.
  50. *
  51. * For example, if type_name was "clustered-disk", it would search
  52. * 'dm-log-clustered-disk' then 'dm-log-clustered'.
  53. *
  54. * Returns: dirty_log_type* on success, NULL on failure
  55. */
  56. static struct dm_dirty_log_type *get_type(const char *type_name)
  57. {
  58. char *p, *type_name_dup;
  59. struct dm_dirty_log_internal *log_type;
  60. if (!type_name)
  61. return NULL;
  62. log_type = _get_dirty_log_type(type_name);
  63. if (log_type)
  64. return log_type->type;
  65. type_name_dup = kstrdup(type_name, GFP_KERNEL);
  66. if (!type_name_dup) {
  67. DMWARN("No memory left to attempt log module load for \"%s\"",
  68. type_name);
  69. return NULL;
  70. }
  71. while (request_module("dm-log-%s", type_name_dup) ||
  72. !(log_type = _get_dirty_log_type(type_name))) {
  73. p = strrchr(type_name_dup, '-');
  74. if (!p)
  75. break;
  76. p[0] = '\0';
  77. }
  78. if (!log_type)
  79. DMWARN("Module for logging type \"%s\" not found.", type_name);
  80. kfree(type_name_dup);
  81. return log_type ? log_type->type : NULL;
  82. }
  83. static void put_type(struct dm_dirty_log_type *type)
  84. {
  85. struct dm_dirty_log_internal *log_type;
  86. if (!type)
  87. return;
  88. spin_lock(&_lock);
  89. log_type = __find_dirty_log_type(type->name);
  90. if (!log_type)
  91. goto out;
  92. module_put(type->module);
  93. out:
  94. spin_unlock(&_lock);
  95. }
  96. static struct dm_dirty_log_internal *_alloc_dirty_log_type(struct dm_dirty_log_type *type)
  97. {
  98. struct dm_dirty_log_internal *log_type = kzalloc(sizeof(*log_type),
  99. GFP_KERNEL);
  100. if (log_type)
  101. log_type->type = type;
  102. return log_type;
  103. }
  104. int dm_dirty_log_type_register(struct dm_dirty_log_type *type)
  105. {
  106. struct dm_dirty_log_internal *log_type = _alloc_dirty_log_type(type);
  107. int r = 0;
  108. if (!log_type)
  109. return -ENOMEM;
  110. spin_lock(&_lock);
  111. if (!__find_dirty_log_type(type->name))
  112. list_add(&log_type->list, &_log_types);
  113. else {
  114. kfree(log_type);
  115. r = -EEXIST;
  116. }
  117. spin_unlock(&_lock);
  118. return r;
  119. }
  120. EXPORT_SYMBOL(dm_dirty_log_type_register);
  121. int dm_dirty_log_type_unregister(struct dm_dirty_log_type *type)
  122. {
  123. struct dm_dirty_log_internal *log_type;
  124. spin_lock(&_lock);
  125. log_type = __find_dirty_log_type(type->name);
  126. if (!log_type) {
  127. spin_unlock(&_lock);
  128. return -EINVAL;
  129. }
  130. list_del(&log_type->list);
  131. spin_unlock(&_lock);
  132. kfree(log_type);
  133. return 0;
  134. }
  135. EXPORT_SYMBOL(dm_dirty_log_type_unregister);
  136. struct dm_dirty_log *dm_dirty_log_create(const char *type_name,
  137. struct dm_target *ti,
  138. unsigned int argc, char **argv)
  139. {
  140. struct dm_dirty_log_type *type;
  141. struct dm_dirty_log *log;
  142. log = kmalloc(sizeof(*log), GFP_KERNEL);
  143. if (!log)
  144. return NULL;
  145. type = get_type(type_name);
  146. if (!type) {
  147. kfree(log);
  148. return NULL;
  149. }
  150. log->type = type;
  151. if (type->ctr(log, ti, argc, argv)) {
  152. kfree(log);
  153. put_type(type);
  154. return NULL;
  155. }
  156. return log;
  157. }
  158. EXPORT_SYMBOL(dm_dirty_log_create);
  159. void dm_dirty_log_destroy(struct dm_dirty_log *log)
  160. {
  161. log->type->dtr(log);
  162. put_type(log->type);
  163. kfree(log);
  164. }
  165. EXPORT_SYMBOL(dm_dirty_log_destroy);
  166. /*-----------------------------------------------------------------
  167. * Persistent and core logs share a lot of their implementation.
  168. * FIXME: need a reload method to be called from a resume
  169. *---------------------------------------------------------------*/
  170. /*
  171. * Magic for persistent mirrors: "MiRr"
  172. */
  173. #define MIRROR_MAGIC 0x4D695272
  174. /*
  175. * The on-disk version of the metadata.
  176. */
  177. #define MIRROR_DISK_VERSION 2
  178. #define LOG_OFFSET 2
  179. struct log_header {
  180. uint32_t magic;
  181. /*
  182. * Simple, incrementing version. no backward
  183. * compatibility.
  184. */
  185. uint32_t version;
  186. sector_t nr_regions;
  187. };
  188. struct log_c {
  189. struct dm_target *ti;
  190. int touched;
  191. uint32_t region_size;
  192. unsigned int region_count;
  193. region_t sync_count;
  194. unsigned bitset_uint32_count;
  195. uint32_t *clean_bits;
  196. uint32_t *sync_bits;
  197. uint32_t *recovering_bits; /* FIXME: this seems excessive */
  198. int sync_search;
  199. /* Resync flag */
  200. enum sync {
  201. DEFAULTSYNC, /* Synchronize if necessary */
  202. NOSYNC, /* Devices known to be already in sync */
  203. FORCESYNC, /* Force a sync to happen */
  204. } sync;
  205. struct dm_io_request io_req;
  206. /*
  207. * Disk log fields
  208. */
  209. int log_dev_failed;
  210. struct dm_dev *log_dev;
  211. struct log_header header;
  212. struct dm_io_region header_location;
  213. struct log_header *disk_header;
  214. };
  215. /*
  216. * The touched member needs to be updated every time we access
  217. * one of the bitsets.
  218. */
  219. static inline int log_test_bit(uint32_t *bs, unsigned bit)
  220. {
  221. return ext2_test_bit(bit, (unsigned long *) bs) ? 1 : 0;
  222. }
  223. static inline void log_set_bit(struct log_c *l,
  224. uint32_t *bs, unsigned bit)
  225. {
  226. ext2_set_bit(bit, (unsigned long *) bs);
  227. l->touched = 1;
  228. }
  229. static inline void log_clear_bit(struct log_c *l,
  230. uint32_t *bs, unsigned bit)
  231. {
  232. ext2_clear_bit(bit, (unsigned long *) bs);
  233. l->touched = 1;
  234. }
  235. /*----------------------------------------------------------------
  236. * Header IO
  237. *--------------------------------------------------------------*/
  238. static void header_to_disk(struct log_header *core, struct log_header *disk)
  239. {
  240. disk->magic = cpu_to_le32(core->magic);
  241. disk->version = cpu_to_le32(core->version);
  242. disk->nr_regions = cpu_to_le64(core->nr_regions);
  243. }
  244. static void header_from_disk(struct log_header *core, struct log_header *disk)
  245. {
  246. core->magic = le32_to_cpu(disk->magic);
  247. core->version = le32_to_cpu(disk->version);
  248. core->nr_regions = le64_to_cpu(disk->nr_regions);
  249. }
  250. static int rw_header(struct log_c *lc, int rw)
  251. {
  252. lc->io_req.bi_rw = rw;
  253. return dm_io(&lc->io_req, 1, &lc->header_location, NULL);
  254. }
  255. static int read_header(struct log_c *log)
  256. {
  257. int r;
  258. r = rw_header(log, READ);
  259. if (r)
  260. return r;
  261. header_from_disk(&log->header, log->disk_header);
  262. /* New log required? */
  263. if (log->sync != DEFAULTSYNC || log->header.magic != MIRROR_MAGIC) {
  264. log->header.magic = MIRROR_MAGIC;
  265. log->header.version = MIRROR_DISK_VERSION;
  266. log->header.nr_regions = 0;
  267. }
  268. #ifdef __LITTLE_ENDIAN
  269. if (log->header.version == 1)
  270. log->header.version = 2;
  271. #endif
  272. if (log->header.version != MIRROR_DISK_VERSION) {
  273. DMWARN("incompatible disk log version");
  274. return -EINVAL;
  275. }
  276. return 0;
  277. }
  278. static int _check_region_size(struct dm_target *ti, uint32_t region_size)
  279. {
  280. if (region_size < 2 || region_size > ti->len)
  281. return 0;
  282. if (!is_power_of_2(region_size))
  283. return 0;
  284. return 1;
  285. }
  286. /*----------------------------------------------------------------
  287. * core log constructor/destructor
  288. *
  289. * argv contains region_size followed optionally by [no]sync
  290. *--------------------------------------------------------------*/
  291. #define BYTE_SHIFT 3
  292. static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
  293. unsigned int argc, char **argv,
  294. struct dm_dev *dev)
  295. {
  296. enum sync sync = DEFAULTSYNC;
  297. struct log_c *lc;
  298. uint32_t region_size;
  299. unsigned int region_count;
  300. size_t bitset_size, buf_size;
  301. int r;
  302. if (argc < 1 || argc > 2) {
  303. DMWARN("wrong number of arguments to dirty region log");
  304. return -EINVAL;
  305. }
  306. if (argc > 1) {
  307. if (!strcmp(argv[1], "sync"))
  308. sync = FORCESYNC;
  309. else if (!strcmp(argv[1], "nosync"))
  310. sync = NOSYNC;
  311. else {
  312. DMWARN("unrecognised sync argument to "
  313. "dirty region log: %s", argv[1]);
  314. return -EINVAL;
  315. }
  316. }
  317. if (sscanf(argv[0], "%u", &region_size) != 1 ||
  318. !_check_region_size(ti, region_size)) {
  319. DMWARN("invalid region size %s", argv[0]);
  320. return -EINVAL;
  321. }
  322. region_count = dm_sector_div_up(ti->len, region_size);
  323. lc = kmalloc(sizeof(*lc), GFP_KERNEL);
  324. if (!lc) {
  325. DMWARN("couldn't allocate core log");
  326. return -ENOMEM;
  327. }
  328. lc->ti = ti;
  329. lc->touched = 0;
  330. lc->region_size = region_size;
  331. lc->region_count = region_count;
  332. lc->sync = sync;
  333. /*
  334. * Work out how many "unsigned long"s we need to hold the bitset.
  335. */
  336. bitset_size = dm_round_up(region_count,
  337. sizeof(*lc->clean_bits) << BYTE_SHIFT);
  338. bitset_size >>= BYTE_SHIFT;
  339. lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits);
  340. /*
  341. * Disk log?
  342. */
  343. if (!dev) {
  344. lc->clean_bits = vmalloc(bitset_size);
  345. if (!lc->clean_bits) {
  346. DMWARN("couldn't allocate clean bitset");
  347. kfree(lc);
  348. return -ENOMEM;
  349. }
  350. lc->disk_header = NULL;
  351. } else {
  352. lc->log_dev = dev;
  353. lc->log_dev_failed = 0;
  354. lc->header_location.bdev = lc->log_dev->bdev;
  355. lc->header_location.sector = 0;
  356. /*
  357. * Buffer holds both header and bitset.
  358. */
  359. buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) +
  360. bitset_size, ti->limits.hardsect_size);
  361. if (buf_size > dev->bdev->bd_inode->i_size) {
  362. DMWARN("log device %s too small: need %llu bytes",
  363. dev->name, (unsigned long long)buf_size);
  364. kfree(lc);
  365. return -EINVAL;
  366. }
  367. lc->header_location.count = buf_size >> SECTOR_SHIFT;
  368. lc->io_req.mem.type = DM_IO_VMA;
  369. lc->io_req.notify.fn = NULL;
  370. lc->io_req.client = dm_io_client_create(dm_div_up(buf_size,
  371. PAGE_SIZE));
  372. if (IS_ERR(lc->io_req.client)) {
  373. r = PTR_ERR(lc->io_req.client);
  374. DMWARN("couldn't allocate disk io client");
  375. kfree(lc);
  376. return -ENOMEM;
  377. }
  378. lc->disk_header = vmalloc(buf_size);
  379. if (!lc->disk_header) {
  380. DMWARN("couldn't allocate disk log buffer");
  381. dm_io_client_destroy(lc->io_req.client);
  382. kfree(lc);
  383. return -ENOMEM;
  384. }
  385. lc->io_req.mem.ptr.vma = lc->disk_header;
  386. lc->clean_bits = (void *)lc->disk_header +
  387. (LOG_OFFSET << SECTOR_SHIFT);
  388. }
  389. memset(lc->clean_bits, -1, bitset_size);
  390. lc->sync_bits = vmalloc(bitset_size);
  391. if (!lc->sync_bits) {
  392. DMWARN("couldn't allocate sync bitset");
  393. if (!dev)
  394. vfree(lc->clean_bits);
  395. else
  396. dm_io_client_destroy(lc->io_req.client);
  397. vfree(lc->disk_header);
  398. kfree(lc);
  399. return -ENOMEM;
  400. }
  401. memset(lc->sync_bits, (sync == NOSYNC) ? -1 : 0, bitset_size);
  402. lc->sync_count = (sync == NOSYNC) ? region_count : 0;
  403. lc->recovering_bits = vmalloc(bitset_size);
  404. if (!lc->recovering_bits) {
  405. DMWARN("couldn't allocate sync bitset");
  406. vfree(lc->sync_bits);
  407. if (!dev)
  408. vfree(lc->clean_bits);
  409. else
  410. dm_io_client_destroy(lc->io_req.client);
  411. vfree(lc->disk_header);
  412. kfree(lc);
  413. return -ENOMEM;
  414. }
  415. memset(lc->recovering_bits, 0, bitset_size);
  416. lc->sync_search = 0;
  417. log->context = lc;
  418. return 0;
  419. }
  420. static int core_ctr(struct dm_dirty_log *log, struct dm_target *ti,
  421. unsigned int argc, char **argv)
  422. {
  423. return create_log_context(log, ti, argc, argv, NULL);
  424. }
  425. static void destroy_log_context(struct log_c *lc)
  426. {
  427. vfree(lc->sync_bits);
  428. vfree(lc->recovering_bits);
  429. kfree(lc);
  430. }
  431. static void core_dtr(struct dm_dirty_log *log)
  432. {
  433. struct log_c *lc = (struct log_c *) log->context;
  434. vfree(lc->clean_bits);
  435. destroy_log_context(lc);
  436. }
  437. /*----------------------------------------------------------------
  438. * disk log constructor/destructor
  439. *
  440. * argv contains log_device region_size followed optionally by [no]sync
  441. *--------------------------------------------------------------*/
  442. static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
  443. unsigned int argc, char **argv)
  444. {
  445. int r;
  446. struct dm_dev *dev;
  447. if (argc < 2 || argc > 3) {
  448. DMWARN("wrong number of arguments to disk dirty region log");
  449. return -EINVAL;
  450. }
  451. r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */,
  452. FMODE_READ | FMODE_WRITE, &dev);
  453. if (r)
  454. return r;
  455. r = create_log_context(log, ti, argc - 1, argv + 1, dev);
  456. if (r) {
  457. dm_put_device(ti, dev);
  458. return r;
  459. }
  460. return 0;
  461. }
  462. static void disk_dtr(struct dm_dirty_log *log)
  463. {
  464. struct log_c *lc = (struct log_c *) log->context;
  465. dm_put_device(lc->ti, lc->log_dev);
  466. vfree(lc->disk_header);
  467. dm_io_client_destroy(lc->io_req.client);
  468. destroy_log_context(lc);
  469. }
  470. static int count_bits32(uint32_t *addr, unsigned size)
  471. {
  472. int count = 0, i;
  473. for (i = 0; i < size; i++) {
  474. count += hweight32(*(addr+i));
  475. }
  476. return count;
  477. }
  478. static void fail_log_device(struct log_c *lc)
  479. {
  480. if (lc->log_dev_failed)
  481. return;
  482. lc->log_dev_failed = 1;
  483. dm_table_event(lc->ti->table);
  484. }
  485. static int disk_resume(struct dm_dirty_log *log)
  486. {
  487. int r;
  488. unsigned i;
  489. struct log_c *lc = (struct log_c *) log->context;
  490. size_t size = lc->bitset_uint32_count * sizeof(uint32_t);
  491. /* read the disk header */
  492. r = read_header(lc);
  493. if (r) {
  494. DMWARN("%s: Failed to read header on dirty region log device",
  495. lc->log_dev->name);
  496. fail_log_device(lc);
  497. /*
  498. * If the log device cannot be read, we must assume
  499. * all regions are out-of-sync. If we simply return
  500. * here, the state will be uninitialized and could
  501. * lead us to return 'in-sync' status for regions
  502. * that are actually 'out-of-sync'.
  503. */
  504. lc->header.nr_regions = 0;
  505. }
  506. /* set or clear any new bits -- device has grown */
  507. if (lc->sync == NOSYNC)
  508. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  509. /* FIXME: amazingly inefficient */
  510. log_set_bit(lc, lc->clean_bits, i);
  511. else
  512. for (i = lc->header.nr_regions; i < lc->region_count; i++)
  513. /* FIXME: amazingly inefficient */
  514. log_clear_bit(lc, lc->clean_bits, i);
  515. /* clear any old bits -- device has shrunk */
  516. for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++)
  517. log_clear_bit(lc, lc->clean_bits, i);
  518. /* copy clean across to sync */
  519. memcpy(lc->sync_bits, lc->clean_bits, size);
  520. lc->sync_count = count_bits32(lc->clean_bits, lc->bitset_uint32_count);
  521. lc->sync_search = 0;
  522. /* set the correct number of regions in the header */
  523. lc->header.nr_regions = lc->region_count;
  524. header_to_disk(&lc->header, lc->disk_header);
  525. /* write the new header */
  526. r = rw_header(lc, WRITE);
  527. if (r) {
  528. DMWARN("%s: Failed to write header on dirty region log device",
  529. lc->log_dev->name);
  530. fail_log_device(lc);
  531. }
  532. return r;
  533. }
  534. static uint32_t core_get_region_size(struct dm_dirty_log *log)
  535. {
  536. struct log_c *lc = (struct log_c *) log->context;
  537. return lc->region_size;
  538. }
  539. static int core_resume(struct dm_dirty_log *log)
  540. {
  541. struct log_c *lc = (struct log_c *) log->context;
  542. lc->sync_search = 0;
  543. return 0;
  544. }
  545. static int core_is_clean(struct dm_dirty_log *log, region_t region)
  546. {
  547. struct log_c *lc = (struct log_c *) log->context;
  548. return log_test_bit(lc->clean_bits, region);
  549. }
  550. static int core_in_sync(struct dm_dirty_log *log, region_t region, int block)
  551. {
  552. struct log_c *lc = (struct log_c *) log->context;
  553. return log_test_bit(lc->sync_bits, region);
  554. }
  555. static int core_flush(struct dm_dirty_log *log)
  556. {
  557. /* no op */
  558. return 0;
  559. }
  560. static int disk_flush(struct dm_dirty_log *log)
  561. {
  562. int r;
  563. struct log_c *lc = (struct log_c *) log->context;
  564. /* only write if the log has changed */
  565. if (!lc->touched)
  566. return 0;
  567. r = rw_header(lc, WRITE);
  568. if (r)
  569. fail_log_device(lc);
  570. else
  571. lc->touched = 0;
  572. return r;
  573. }
  574. static void core_mark_region(struct dm_dirty_log *log, region_t region)
  575. {
  576. struct log_c *lc = (struct log_c *) log->context;
  577. log_clear_bit(lc, lc->clean_bits, region);
  578. }
  579. static void core_clear_region(struct dm_dirty_log *log, region_t region)
  580. {
  581. struct log_c *lc = (struct log_c *) log->context;
  582. log_set_bit(lc, lc->clean_bits, region);
  583. }
  584. static int core_get_resync_work(struct dm_dirty_log *log, region_t *region)
  585. {
  586. struct log_c *lc = (struct log_c *) log->context;
  587. if (lc->sync_search >= lc->region_count)
  588. return 0;
  589. do {
  590. *region = ext2_find_next_zero_bit(
  591. (unsigned long *) lc->sync_bits,
  592. lc->region_count,
  593. lc->sync_search);
  594. lc->sync_search = *region + 1;
  595. if (*region >= lc->region_count)
  596. return 0;
  597. } while (log_test_bit(lc->recovering_bits, *region));
  598. log_set_bit(lc, lc->recovering_bits, *region);
  599. return 1;
  600. }
  601. static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
  602. int in_sync)
  603. {
  604. struct log_c *lc = (struct log_c *) log->context;
  605. log_clear_bit(lc, lc->recovering_bits, region);
  606. if (in_sync) {
  607. log_set_bit(lc, lc->sync_bits, region);
  608. lc->sync_count++;
  609. } else if (log_test_bit(lc->sync_bits, region)) {
  610. lc->sync_count--;
  611. log_clear_bit(lc, lc->sync_bits, region);
  612. }
  613. }
  614. static region_t core_get_sync_count(struct dm_dirty_log *log)
  615. {
  616. struct log_c *lc = (struct log_c *) log->context;
  617. return lc->sync_count;
  618. }
  619. #define DMEMIT_SYNC \
  620. if (lc->sync != DEFAULTSYNC) \
  621. DMEMIT("%ssync ", lc->sync == NOSYNC ? "no" : "")
  622. static int core_status(struct dm_dirty_log *log, status_type_t status,
  623. char *result, unsigned int maxlen)
  624. {
  625. int sz = 0;
  626. struct log_c *lc = log->context;
  627. switch(status) {
  628. case STATUSTYPE_INFO:
  629. DMEMIT("1 %s", log->type->name);
  630. break;
  631. case STATUSTYPE_TABLE:
  632. DMEMIT("%s %u %u ", log->type->name,
  633. lc->sync == DEFAULTSYNC ? 1 : 2, lc->region_size);
  634. DMEMIT_SYNC;
  635. }
  636. return sz;
  637. }
  638. static int disk_status(struct dm_dirty_log *log, status_type_t status,
  639. char *result, unsigned int maxlen)
  640. {
  641. int sz = 0;
  642. struct log_c *lc = log->context;
  643. switch(status) {
  644. case STATUSTYPE_INFO:
  645. DMEMIT("3 %s %s %c", log->type->name, lc->log_dev->name,
  646. lc->log_dev_failed ? 'D' : 'A');
  647. break;
  648. case STATUSTYPE_TABLE:
  649. DMEMIT("%s %u %s %u ", log->type->name,
  650. lc->sync == DEFAULTSYNC ? 2 : 3, lc->log_dev->name,
  651. lc->region_size);
  652. DMEMIT_SYNC;
  653. }
  654. return sz;
  655. }
  656. static struct dm_dirty_log_type _core_type = {
  657. .name = "core",
  658. .module = THIS_MODULE,
  659. .ctr = core_ctr,
  660. .dtr = core_dtr,
  661. .resume = core_resume,
  662. .get_region_size = core_get_region_size,
  663. .is_clean = core_is_clean,
  664. .in_sync = core_in_sync,
  665. .flush = core_flush,
  666. .mark_region = core_mark_region,
  667. .clear_region = core_clear_region,
  668. .get_resync_work = core_get_resync_work,
  669. .set_region_sync = core_set_region_sync,
  670. .get_sync_count = core_get_sync_count,
  671. .status = core_status,
  672. };
  673. static struct dm_dirty_log_type _disk_type = {
  674. .name = "disk",
  675. .module = THIS_MODULE,
  676. .ctr = disk_ctr,
  677. .dtr = disk_dtr,
  678. .postsuspend = disk_flush,
  679. .resume = disk_resume,
  680. .get_region_size = core_get_region_size,
  681. .is_clean = core_is_clean,
  682. .in_sync = core_in_sync,
  683. .flush = disk_flush,
  684. .mark_region = core_mark_region,
  685. .clear_region = core_clear_region,
  686. .get_resync_work = core_get_resync_work,
  687. .set_region_sync = core_set_region_sync,
  688. .get_sync_count = core_get_sync_count,
  689. .status = disk_status,
  690. };
  691. static int __init dm_dirty_log_init(void)
  692. {
  693. int r;
  694. r = dm_dirty_log_type_register(&_core_type);
  695. if (r)
  696. DMWARN("couldn't register core log");
  697. r = dm_dirty_log_type_register(&_disk_type);
  698. if (r) {
  699. DMWARN("couldn't register disk type");
  700. dm_dirty_log_type_unregister(&_core_type);
  701. }
  702. return r;
  703. }
  704. static void __exit dm_dirty_log_exit(void)
  705. {
  706. dm_dirty_log_type_unregister(&_disk_type);
  707. dm_dirty_log_type_unregister(&_core_type);
  708. }
  709. module_init(dm_dirty_log_init);
  710. module_exit(dm_dirty_log_exit);
  711. MODULE_DESCRIPTION(DM_NAME " dirty region log");
  712. MODULE_AUTHOR("Joe Thornber, Heinz Mauelshagen <dm-devel@redhat.com>");
  713. MODULE_LICENSE("GPL");