raid5-cache.c 34 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271
  1. /*
  2. * Copyright (C) 2015 Shaohua Li <shli@fb.com>
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/wait.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/slab.h>
  18. #include <linux/raid/md_p.h>
  19. #include <linux/crc32c.h>
  20. #include <linux/random.h>
  21. #include "md.h"
  22. #include "raid5.h"
  23. /*
  24. * metadata/data stored in disk with 4k size unit (a block) regardless
  25. * underneath hardware sector size. only works with PAGE_SIZE == 4096
  26. */
  27. #define BLOCK_SECTORS (8)
  28. /*
  29. * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
  30. * recovery scans a very long log
  31. */
  32. #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
  33. #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
  34. /*
  35. * We only need 2 bios per I/O unit to make progress, but ensure we
  36. * have a few more available to not get too tight.
  37. */
  38. #define R5L_POOL_SIZE 4
  39. struct r5l_log {
  40. struct md_rdev *rdev;
  41. u32 uuid_checksum;
  42. sector_t device_size; /* log device size, round to
  43. * BLOCK_SECTORS */
  44. sector_t max_free_space; /* reclaim run if free space is at
  45. * this size */
  46. sector_t last_checkpoint; /* log tail. where recovery scan
  47. * starts from */
  48. u64 last_cp_seq; /* log tail sequence */
  49. sector_t log_start; /* log head. where new data appends */
  50. u64 seq; /* log head sequence */
  51. sector_t next_checkpoint;
  52. u64 next_cp_seq;
  53. struct mutex io_mutex;
  54. struct r5l_io_unit *current_io; /* current io_unit accepting new data */
  55. spinlock_t io_list_lock;
  56. struct list_head running_ios; /* io_units which are still running,
  57. * and have not yet been completely
  58. * written to the log */
  59. struct list_head io_end_ios; /* io_units which have been completely
  60. * written to the log but not yet written
  61. * to the RAID */
  62. struct list_head flushing_ios; /* io_units which are waiting for log
  63. * cache flush */
  64. struct list_head finished_ios; /* io_units which settle down in log disk */
  65. struct bio flush_bio;
  66. struct list_head no_mem_stripes; /* pending stripes, -ENOMEM */
  67. struct kmem_cache *io_kc;
  68. mempool_t *io_pool;
  69. struct bio_set *bs;
  70. mempool_t *meta_pool;
  71. struct md_thread *reclaim_thread;
  72. unsigned long reclaim_target; /* number of space that need to be
  73. * reclaimed. if it's 0, reclaim spaces
  74. * used by io_units which are in
  75. * IO_UNIT_STRIPE_END state (eg, reclaim
  76. * dones't wait for specific io_unit
  77. * switching to IO_UNIT_STRIPE_END
  78. * state) */
  79. wait_queue_head_t iounit_wait;
  80. struct list_head no_space_stripes; /* pending stripes, log has no space */
  81. spinlock_t no_space_stripes_lock;
  82. bool need_cache_flush;
  83. bool in_teardown;
  84. };
  85. /*
  86. * an IO range starts from a meta data block and end at the next meta data
  87. * block. The io unit's the meta data block tracks data/parity followed it. io
  88. * unit is written to log disk with normal write, as we always flush log disk
  89. * first and then start move data to raid disks, there is no requirement to
  90. * write io unit with FLUSH/FUA
  91. */
  92. struct r5l_io_unit {
  93. struct r5l_log *log;
  94. struct page *meta_page; /* store meta block */
  95. int meta_offset; /* current offset in meta_page */
  96. struct bio *current_bio;/* current_bio accepting new data */
  97. atomic_t pending_stripe;/* how many stripes not flushed to raid */
  98. u64 seq; /* seq number of the metablock */
  99. sector_t log_start; /* where the io_unit starts */
  100. sector_t log_end; /* where the io_unit ends */
  101. struct list_head log_sibling; /* log->running_ios */
  102. struct list_head stripe_list; /* stripes added to the io_unit */
  103. int state;
  104. bool need_split_bio;
  105. };
  106. /* r5l_io_unit state */
  107. enum r5l_io_unit_state {
  108. IO_UNIT_RUNNING = 0, /* accepting new IO */
  109. IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
  110. * don't accepting new bio */
  111. IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
  112. IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
  113. };
  114. static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
  115. {
  116. start += inc;
  117. if (start >= log->device_size)
  118. start = start - log->device_size;
  119. return start;
  120. }
  121. static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
  122. sector_t end)
  123. {
  124. if (end >= start)
  125. return end - start;
  126. else
  127. return end + log->device_size - start;
  128. }
  129. static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
  130. {
  131. sector_t used_size;
  132. used_size = r5l_ring_distance(log, log->last_checkpoint,
  133. log->log_start);
  134. return log->device_size > used_size + size;
  135. }
  136. static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
  137. enum r5l_io_unit_state state)
  138. {
  139. if (WARN_ON(io->state >= state))
  140. return;
  141. io->state = state;
  142. }
  143. static void r5l_io_run_stripes(struct r5l_io_unit *io)
  144. {
  145. struct stripe_head *sh, *next;
  146. list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) {
  147. list_del_init(&sh->log_list);
  148. set_bit(STRIPE_HANDLE, &sh->state);
  149. raid5_release_stripe(sh);
  150. }
  151. }
  152. static void r5l_log_run_stripes(struct r5l_log *log)
  153. {
  154. struct r5l_io_unit *io, *next;
  155. assert_spin_locked(&log->io_list_lock);
  156. list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
  157. /* don't change list order */
  158. if (io->state < IO_UNIT_IO_END)
  159. break;
  160. list_move_tail(&io->log_sibling, &log->finished_ios);
  161. r5l_io_run_stripes(io);
  162. }
  163. }
  164. static void r5l_move_to_end_ios(struct r5l_log *log)
  165. {
  166. struct r5l_io_unit *io, *next;
  167. assert_spin_locked(&log->io_list_lock);
  168. list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
  169. /* don't change list order */
  170. if (io->state < IO_UNIT_IO_END)
  171. break;
  172. list_move_tail(&io->log_sibling, &log->io_end_ios);
  173. }
  174. }
  175. static void r5l_log_endio(struct bio *bio)
  176. {
  177. struct r5l_io_unit *io = bio->bi_private;
  178. struct r5l_log *log = io->log;
  179. unsigned long flags;
  180. if (bio->bi_error)
  181. md_error(log->rdev->mddev, log->rdev);
  182. bio_put(bio);
  183. mempool_free(io->meta_page, log->meta_pool);
  184. spin_lock_irqsave(&log->io_list_lock, flags);
  185. __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
  186. if (log->need_cache_flush)
  187. r5l_move_to_end_ios(log);
  188. else
  189. r5l_log_run_stripes(log);
  190. spin_unlock_irqrestore(&log->io_list_lock, flags);
  191. if (log->need_cache_flush)
  192. md_wakeup_thread(log->rdev->mddev->thread);
  193. }
  194. static void r5l_submit_current_io(struct r5l_log *log)
  195. {
  196. struct r5l_io_unit *io = log->current_io;
  197. struct r5l_meta_block *block;
  198. unsigned long flags;
  199. u32 crc;
  200. if (!io)
  201. return;
  202. block = page_address(io->meta_page);
  203. block->meta_size = cpu_to_le32(io->meta_offset);
  204. crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
  205. block->checksum = cpu_to_le32(crc);
  206. log->current_io = NULL;
  207. spin_lock_irqsave(&log->io_list_lock, flags);
  208. __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
  209. spin_unlock_irqrestore(&log->io_list_lock, flags);
  210. submit_bio(WRITE, io->current_bio);
  211. }
  212. static struct bio *r5l_bio_alloc(struct r5l_log *log)
  213. {
  214. struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
  215. bio->bi_rw = WRITE;
  216. bio->bi_bdev = log->rdev->bdev;
  217. bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
  218. return bio;
  219. }
  220. static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io)
  221. {
  222. log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
  223. /*
  224. * If we filled up the log device start from the beginning again,
  225. * which will require a new bio.
  226. *
  227. * Note: for this to work properly the log size needs to me a multiple
  228. * of BLOCK_SECTORS.
  229. */
  230. if (log->log_start == 0)
  231. io->need_split_bio = true;
  232. io->log_end = log->log_start;
  233. }
  234. static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
  235. {
  236. struct r5l_io_unit *io;
  237. struct r5l_meta_block *block;
  238. io = mempool_alloc(log->io_pool, GFP_ATOMIC);
  239. if (!io)
  240. return NULL;
  241. memset(io, 0, sizeof(*io));
  242. io->log = log;
  243. INIT_LIST_HEAD(&io->log_sibling);
  244. INIT_LIST_HEAD(&io->stripe_list);
  245. io->state = IO_UNIT_RUNNING;
  246. io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO);
  247. block = page_address(io->meta_page);
  248. clear_page(block);
  249. block->magic = cpu_to_le32(R5LOG_MAGIC);
  250. block->version = R5LOG_VERSION;
  251. block->seq = cpu_to_le64(log->seq);
  252. block->position = cpu_to_le64(log->log_start);
  253. io->log_start = log->log_start;
  254. io->meta_offset = sizeof(struct r5l_meta_block);
  255. io->seq = log->seq++;
  256. io->current_bio = r5l_bio_alloc(log);
  257. io->current_bio->bi_end_io = r5l_log_endio;
  258. io->current_bio->bi_private = io;
  259. bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0);
  260. r5_reserve_log_entry(log, io);
  261. spin_lock_irq(&log->io_list_lock);
  262. list_add_tail(&io->log_sibling, &log->running_ios);
  263. spin_unlock_irq(&log->io_list_lock);
  264. return io;
  265. }
  266. static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
  267. {
  268. if (log->current_io &&
  269. log->current_io->meta_offset + payload_size > PAGE_SIZE)
  270. r5l_submit_current_io(log);
  271. if (!log->current_io) {
  272. log->current_io = r5l_new_meta(log);
  273. if (!log->current_io)
  274. return -ENOMEM;
  275. }
  276. return 0;
  277. }
  278. static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
  279. sector_t location,
  280. u32 checksum1, u32 checksum2,
  281. bool checksum2_valid)
  282. {
  283. struct r5l_io_unit *io = log->current_io;
  284. struct r5l_payload_data_parity *payload;
  285. payload = page_address(io->meta_page) + io->meta_offset;
  286. payload->header.type = cpu_to_le16(type);
  287. payload->header.flags = cpu_to_le16(0);
  288. payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
  289. (PAGE_SHIFT - 9));
  290. payload->location = cpu_to_le64(location);
  291. payload->checksum[0] = cpu_to_le32(checksum1);
  292. if (checksum2_valid)
  293. payload->checksum[1] = cpu_to_le32(checksum2);
  294. io->meta_offset += sizeof(struct r5l_payload_data_parity) +
  295. sizeof(__le32) * (1 + !!checksum2_valid);
  296. }
  297. static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
  298. {
  299. struct r5l_io_unit *io = log->current_io;
  300. if (io->need_split_bio) {
  301. struct bio *prev = io->current_bio;
  302. io->current_bio = r5l_bio_alloc(log);
  303. bio_chain(io->current_bio, prev);
  304. submit_bio(WRITE, prev);
  305. }
  306. if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0))
  307. BUG();
  308. r5_reserve_log_entry(log, io);
  309. }
  310. static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
  311. int data_pages, int parity_pages)
  312. {
  313. int i;
  314. int meta_size;
  315. int ret;
  316. struct r5l_io_unit *io;
  317. meta_size =
  318. ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
  319. * data_pages) +
  320. sizeof(struct r5l_payload_data_parity) +
  321. sizeof(__le32) * parity_pages;
  322. ret = r5l_get_meta(log, meta_size);
  323. if (ret)
  324. return ret;
  325. io = log->current_io;
  326. for (i = 0; i < sh->disks; i++) {
  327. if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
  328. continue;
  329. if (i == sh->pd_idx || i == sh->qd_idx)
  330. continue;
  331. r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
  332. raid5_compute_blocknr(sh, i, 0),
  333. sh->dev[i].log_checksum, 0, false);
  334. r5l_append_payload_page(log, sh->dev[i].page);
  335. }
  336. if (sh->qd_idx >= 0) {
  337. r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
  338. sh->sector, sh->dev[sh->pd_idx].log_checksum,
  339. sh->dev[sh->qd_idx].log_checksum, true);
  340. r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
  341. r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
  342. } else {
  343. r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
  344. sh->sector, sh->dev[sh->pd_idx].log_checksum,
  345. 0, false);
  346. r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
  347. }
  348. list_add_tail(&sh->log_list, &io->stripe_list);
  349. atomic_inc(&io->pending_stripe);
  350. sh->log_io = io;
  351. return 0;
  352. }
  353. static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
  354. /*
  355. * running in raid5d, where reclaim could wait for raid5d too (when it flushes
  356. * data from log to raid disks), so we shouldn't wait for reclaim here
  357. */
  358. int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
  359. {
  360. int write_disks = 0;
  361. int data_pages, parity_pages;
  362. int meta_size;
  363. int reserve;
  364. int i;
  365. int ret = 0;
  366. if (!log)
  367. return -EAGAIN;
  368. /* Don't support stripe batch */
  369. if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
  370. test_bit(STRIPE_SYNCING, &sh->state)) {
  371. /* the stripe is written to log, we start writing it to raid */
  372. clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
  373. return -EAGAIN;
  374. }
  375. for (i = 0; i < sh->disks; i++) {
  376. void *addr;
  377. if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
  378. continue;
  379. write_disks++;
  380. /* checksum is already calculated in last run */
  381. if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
  382. continue;
  383. addr = kmap_atomic(sh->dev[i].page);
  384. sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
  385. addr, PAGE_SIZE);
  386. kunmap_atomic(addr);
  387. }
  388. parity_pages = 1 + !!(sh->qd_idx >= 0);
  389. data_pages = write_disks - parity_pages;
  390. meta_size =
  391. ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
  392. * data_pages) +
  393. sizeof(struct r5l_payload_data_parity) +
  394. sizeof(__le32) * parity_pages;
  395. /* Doesn't work with very big raid array */
  396. if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
  397. return -EINVAL;
  398. set_bit(STRIPE_LOG_TRAPPED, &sh->state);
  399. /*
  400. * The stripe must enter state machine again to finish the write, so
  401. * don't delay.
  402. */
  403. clear_bit(STRIPE_DELAYED, &sh->state);
  404. atomic_inc(&sh->count);
  405. mutex_lock(&log->io_mutex);
  406. /* meta + data */
  407. reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
  408. if (!r5l_has_free_space(log, reserve)) {
  409. spin_lock(&log->no_space_stripes_lock);
  410. list_add_tail(&sh->log_list, &log->no_space_stripes);
  411. spin_unlock(&log->no_space_stripes_lock);
  412. r5l_wake_reclaim(log, reserve);
  413. } else {
  414. ret = r5l_log_stripe(log, sh, data_pages, parity_pages);
  415. if (ret) {
  416. spin_lock_irq(&log->io_list_lock);
  417. list_add_tail(&sh->log_list, &log->no_mem_stripes);
  418. spin_unlock_irq(&log->io_list_lock);
  419. }
  420. }
  421. mutex_unlock(&log->io_mutex);
  422. return 0;
  423. }
  424. void r5l_write_stripe_run(struct r5l_log *log)
  425. {
  426. if (!log)
  427. return;
  428. mutex_lock(&log->io_mutex);
  429. r5l_submit_current_io(log);
  430. mutex_unlock(&log->io_mutex);
  431. }
  432. int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
  433. {
  434. if (!log)
  435. return -ENODEV;
  436. /*
  437. * we flush log disk cache first, then write stripe data to raid disks.
  438. * So if bio is finished, the log disk cache is flushed already. The
  439. * recovery guarantees we can recovery the bio from log disk, so we
  440. * don't need to flush again
  441. */
  442. if (bio->bi_iter.bi_size == 0) {
  443. bio_endio(bio);
  444. return 0;
  445. }
  446. bio->bi_rw &= ~REQ_FLUSH;
  447. return -EAGAIN;
  448. }
  449. /* This will run after log space is reclaimed */
  450. static void r5l_run_no_space_stripes(struct r5l_log *log)
  451. {
  452. struct stripe_head *sh;
  453. spin_lock(&log->no_space_stripes_lock);
  454. while (!list_empty(&log->no_space_stripes)) {
  455. sh = list_first_entry(&log->no_space_stripes,
  456. struct stripe_head, log_list);
  457. list_del_init(&sh->log_list);
  458. set_bit(STRIPE_HANDLE, &sh->state);
  459. raid5_release_stripe(sh);
  460. }
  461. spin_unlock(&log->no_space_stripes_lock);
  462. }
  463. static sector_t r5l_reclaimable_space(struct r5l_log *log)
  464. {
  465. return r5l_ring_distance(log, log->last_checkpoint,
  466. log->next_checkpoint);
  467. }
  468. static void r5l_run_no_mem_stripe(struct r5l_log *log)
  469. {
  470. struct stripe_head *sh;
  471. assert_spin_locked(&log->io_list_lock);
  472. if (!list_empty(&log->no_mem_stripes)) {
  473. sh = list_first_entry(&log->no_mem_stripes,
  474. struct stripe_head, log_list);
  475. list_del_init(&sh->log_list);
  476. set_bit(STRIPE_HANDLE, &sh->state);
  477. raid5_release_stripe(sh);
  478. }
  479. }
  480. static bool r5l_complete_finished_ios(struct r5l_log *log)
  481. {
  482. struct r5l_io_unit *io, *next;
  483. bool found = false;
  484. assert_spin_locked(&log->io_list_lock);
  485. list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
  486. /* don't change list order */
  487. if (io->state < IO_UNIT_STRIPE_END)
  488. break;
  489. log->next_checkpoint = io->log_start;
  490. log->next_cp_seq = io->seq;
  491. list_del(&io->log_sibling);
  492. mempool_free(io, log->io_pool);
  493. r5l_run_no_mem_stripe(log);
  494. found = true;
  495. }
  496. return found;
  497. }
  498. static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
  499. {
  500. struct r5l_log *log = io->log;
  501. unsigned long flags;
  502. spin_lock_irqsave(&log->io_list_lock, flags);
  503. __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
  504. if (!r5l_complete_finished_ios(log)) {
  505. spin_unlock_irqrestore(&log->io_list_lock, flags);
  506. return;
  507. }
  508. if (r5l_reclaimable_space(log) > log->max_free_space)
  509. r5l_wake_reclaim(log, 0);
  510. spin_unlock_irqrestore(&log->io_list_lock, flags);
  511. wake_up(&log->iounit_wait);
  512. }
  513. void r5l_stripe_write_finished(struct stripe_head *sh)
  514. {
  515. struct r5l_io_unit *io;
  516. io = sh->log_io;
  517. sh->log_io = NULL;
  518. if (io && atomic_dec_and_test(&io->pending_stripe))
  519. __r5l_stripe_write_finished(io);
  520. }
  521. static void r5l_log_flush_endio(struct bio *bio)
  522. {
  523. struct r5l_log *log = container_of(bio, struct r5l_log,
  524. flush_bio);
  525. unsigned long flags;
  526. struct r5l_io_unit *io;
  527. if (bio->bi_error)
  528. md_error(log->rdev->mddev, log->rdev);
  529. spin_lock_irqsave(&log->io_list_lock, flags);
  530. list_for_each_entry(io, &log->flushing_ios, log_sibling)
  531. r5l_io_run_stripes(io);
  532. list_splice_tail_init(&log->flushing_ios, &log->finished_ios);
  533. spin_unlock_irqrestore(&log->io_list_lock, flags);
  534. }
  535. /*
  536. * Starting dispatch IO to raid.
  537. * io_unit(meta) consists of a log. There is one situation we want to avoid. A
  538. * broken meta in the middle of a log causes recovery can't find meta at the
  539. * head of log. If operations require meta at the head persistent in log, we
  540. * must make sure meta before it persistent in log too. A case is:
  541. *
  542. * stripe data/parity is in log, we start write stripe to raid disks. stripe
  543. * data/parity must be persistent in log before we do the write to raid disks.
  544. *
  545. * The solution is we restrictly maintain io_unit list order. In this case, we
  546. * only write stripes of an io_unit to raid disks till the io_unit is the first
  547. * one whose data/parity is in log.
  548. */
  549. void r5l_flush_stripe_to_raid(struct r5l_log *log)
  550. {
  551. bool do_flush;
  552. if (!log || !log->need_cache_flush)
  553. return;
  554. spin_lock_irq(&log->io_list_lock);
  555. /* flush bio is running */
  556. if (!list_empty(&log->flushing_ios)) {
  557. spin_unlock_irq(&log->io_list_lock);
  558. return;
  559. }
  560. list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
  561. do_flush = !list_empty(&log->flushing_ios);
  562. spin_unlock_irq(&log->io_list_lock);
  563. if (!do_flush)
  564. return;
  565. bio_reset(&log->flush_bio);
  566. log->flush_bio.bi_bdev = log->rdev->bdev;
  567. log->flush_bio.bi_end_io = r5l_log_flush_endio;
  568. submit_bio(WRITE_FLUSH, &log->flush_bio);
  569. }
  570. static void r5l_write_super(struct r5l_log *log, sector_t cp);
  571. static void r5l_write_super_and_discard_space(struct r5l_log *log,
  572. sector_t end)
  573. {
  574. struct block_device *bdev = log->rdev->bdev;
  575. struct mddev *mddev;
  576. r5l_write_super(log, end);
  577. if (!blk_queue_discard(bdev_get_queue(bdev)))
  578. return;
  579. mddev = log->rdev->mddev;
  580. /*
  581. * This is to avoid a deadlock. r5l_quiesce holds reconfig_mutex and
  582. * wait for this thread to finish. This thread waits for
  583. * MD_CHANGE_PENDING clear, which is supposed to be done in
  584. * md_check_recovery(). md_check_recovery() tries to get
  585. * reconfig_mutex. Since r5l_quiesce already holds the mutex,
  586. * md_check_recovery() fails, so the PENDING never get cleared. The
  587. * in_teardown check workaround this issue.
  588. */
  589. if (!log->in_teardown) {
  590. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  591. set_bit(MD_CHANGE_PENDING, &mddev->flags);
  592. md_wakeup_thread(mddev->thread);
  593. wait_event(mddev->sb_wait,
  594. !test_bit(MD_CHANGE_PENDING, &mddev->flags) ||
  595. log->in_teardown);
  596. /*
  597. * r5l_quiesce could run after in_teardown check and hold
  598. * mutex first. Superblock might get updated twice.
  599. */
  600. if (log->in_teardown)
  601. md_update_sb(mddev, 1);
  602. } else {
  603. WARN_ON(!mddev_is_locked(mddev));
  604. md_update_sb(mddev, 1);
  605. }
  606. /* discard IO error really doesn't matter, ignore it */
  607. if (log->last_checkpoint < end) {
  608. blkdev_issue_discard(bdev,
  609. log->last_checkpoint + log->rdev->data_offset,
  610. end - log->last_checkpoint, GFP_NOIO, 0);
  611. } else {
  612. blkdev_issue_discard(bdev,
  613. log->last_checkpoint + log->rdev->data_offset,
  614. log->device_size - log->last_checkpoint,
  615. GFP_NOIO, 0);
  616. blkdev_issue_discard(bdev, log->rdev->data_offset, end,
  617. GFP_NOIO, 0);
  618. }
  619. }
  620. static void r5l_do_reclaim(struct r5l_log *log)
  621. {
  622. sector_t reclaim_target = xchg(&log->reclaim_target, 0);
  623. sector_t reclaimable;
  624. sector_t next_checkpoint;
  625. u64 next_cp_seq;
  626. spin_lock_irq(&log->io_list_lock);
  627. /*
  628. * move proper io_unit to reclaim list. We should not change the order.
  629. * reclaimable/unreclaimable io_unit can be mixed in the list, we
  630. * shouldn't reuse space of an unreclaimable io_unit
  631. */
  632. while (1) {
  633. reclaimable = r5l_reclaimable_space(log);
  634. if (reclaimable >= reclaim_target ||
  635. (list_empty(&log->running_ios) &&
  636. list_empty(&log->io_end_ios) &&
  637. list_empty(&log->flushing_ios) &&
  638. list_empty(&log->finished_ios)))
  639. break;
  640. md_wakeup_thread(log->rdev->mddev->thread);
  641. wait_event_lock_irq(log->iounit_wait,
  642. r5l_reclaimable_space(log) > reclaimable,
  643. log->io_list_lock);
  644. }
  645. next_checkpoint = log->next_checkpoint;
  646. next_cp_seq = log->next_cp_seq;
  647. spin_unlock_irq(&log->io_list_lock);
  648. BUG_ON(reclaimable < 0);
  649. if (reclaimable == 0)
  650. return;
  651. /*
  652. * write_super will flush cache of each raid disk. We must write super
  653. * here, because the log area might be reused soon and we don't want to
  654. * confuse recovery
  655. */
  656. r5l_write_super_and_discard_space(log, next_checkpoint);
  657. mutex_lock(&log->io_mutex);
  658. log->last_checkpoint = next_checkpoint;
  659. log->last_cp_seq = next_cp_seq;
  660. mutex_unlock(&log->io_mutex);
  661. r5l_run_no_space_stripes(log);
  662. }
  663. static void r5l_reclaim_thread(struct md_thread *thread)
  664. {
  665. struct mddev *mddev = thread->mddev;
  666. struct r5conf *conf = mddev->private;
  667. struct r5l_log *log = conf->log;
  668. if (!log)
  669. return;
  670. r5l_do_reclaim(log);
  671. }
  672. static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
  673. {
  674. unsigned long target;
  675. unsigned long new = (unsigned long)space; /* overflow in theory */
  676. do {
  677. target = log->reclaim_target;
  678. if (new < target)
  679. return;
  680. } while (cmpxchg(&log->reclaim_target, target, new) != target);
  681. md_wakeup_thread(log->reclaim_thread);
  682. }
  683. void r5l_quiesce(struct r5l_log *log, int state)
  684. {
  685. struct mddev *mddev;
  686. if (!log || state == 2)
  687. return;
  688. if (state == 0) {
  689. log->in_teardown = 0;
  690. /*
  691. * This is a special case for hotadd. In suspend, the array has
  692. * no journal. In resume, journal is initialized as well as the
  693. * reclaim thread.
  694. */
  695. if (log->reclaim_thread)
  696. return;
  697. log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
  698. log->rdev->mddev, "reclaim");
  699. } else if (state == 1) {
  700. /*
  701. * at this point all stripes are finished, so io_unit is at
  702. * least in STRIPE_END state
  703. */
  704. log->in_teardown = 1;
  705. /* make sure r5l_write_super_and_discard_space exits */
  706. mddev = log->rdev->mddev;
  707. wake_up(&mddev->sb_wait);
  708. r5l_wake_reclaim(log, -1L);
  709. md_unregister_thread(&log->reclaim_thread);
  710. r5l_do_reclaim(log);
  711. }
  712. }
  713. bool r5l_log_disk_error(struct r5conf *conf)
  714. {
  715. struct r5l_log *log;
  716. bool ret;
  717. /* don't allow write if journal disk is missing */
  718. rcu_read_lock();
  719. log = rcu_dereference(conf->log);
  720. if (!log)
  721. ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
  722. else
  723. ret = test_bit(Faulty, &log->rdev->flags);
  724. rcu_read_unlock();
  725. return ret;
  726. }
  727. struct r5l_recovery_ctx {
  728. struct page *meta_page; /* current meta */
  729. sector_t meta_total_blocks; /* total size of current meta and data */
  730. sector_t pos; /* recovery position */
  731. u64 seq; /* recovery position seq */
  732. };
  733. static int r5l_read_meta_block(struct r5l_log *log,
  734. struct r5l_recovery_ctx *ctx)
  735. {
  736. struct page *page = ctx->meta_page;
  737. struct r5l_meta_block *mb;
  738. u32 crc, stored_crc;
  739. if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
  740. return -EIO;
  741. mb = page_address(page);
  742. stored_crc = le32_to_cpu(mb->checksum);
  743. mb->checksum = 0;
  744. if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
  745. le64_to_cpu(mb->seq) != ctx->seq ||
  746. mb->version != R5LOG_VERSION ||
  747. le64_to_cpu(mb->position) != ctx->pos)
  748. return -EINVAL;
  749. crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
  750. if (stored_crc != crc)
  751. return -EINVAL;
  752. if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
  753. return -EINVAL;
  754. ctx->meta_total_blocks = BLOCK_SECTORS;
  755. return 0;
  756. }
  757. static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
  758. struct r5l_recovery_ctx *ctx,
  759. sector_t stripe_sect,
  760. int *offset, sector_t *log_offset)
  761. {
  762. struct r5conf *conf = log->rdev->mddev->private;
  763. struct stripe_head *sh;
  764. struct r5l_payload_data_parity *payload;
  765. int disk_index;
  766. sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
  767. while (1) {
  768. payload = page_address(ctx->meta_page) + *offset;
  769. if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
  770. raid5_compute_sector(conf,
  771. le64_to_cpu(payload->location), 0,
  772. &disk_index, sh);
  773. sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
  774. sh->dev[disk_index].page, READ, false);
  775. sh->dev[disk_index].log_checksum =
  776. le32_to_cpu(payload->checksum[0]);
  777. set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
  778. ctx->meta_total_blocks += BLOCK_SECTORS;
  779. } else {
  780. disk_index = sh->pd_idx;
  781. sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
  782. sh->dev[disk_index].page, READ, false);
  783. sh->dev[disk_index].log_checksum =
  784. le32_to_cpu(payload->checksum[0]);
  785. set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
  786. if (sh->qd_idx >= 0) {
  787. disk_index = sh->qd_idx;
  788. sync_page_io(log->rdev,
  789. r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
  790. PAGE_SIZE, sh->dev[disk_index].page,
  791. READ, false);
  792. sh->dev[disk_index].log_checksum =
  793. le32_to_cpu(payload->checksum[1]);
  794. set_bit(R5_Wantwrite,
  795. &sh->dev[disk_index].flags);
  796. }
  797. ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
  798. }
  799. *log_offset = r5l_ring_add(log, *log_offset,
  800. le32_to_cpu(payload->size));
  801. *offset += sizeof(struct r5l_payload_data_parity) +
  802. sizeof(__le32) *
  803. (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
  804. if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
  805. break;
  806. }
  807. for (disk_index = 0; disk_index < sh->disks; disk_index++) {
  808. void *addr;
  809. u32 checksum;
  810. if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
  811. continue;
  812. addr = kmap_atomic(sh->dev[disk_index].page);
  813. checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
  814. kunmap_atomic(addr);
  815. if (checksum != sh->dev[disk_index].log_checksum)
  816. goto error;
  817. }
  818. for (disk_index = 0; disk_index < sh->disks; disk_index++) {
  819. struct md_rdev *rdev, *rrdev;
  820. if (!test_and_clear_bit(R5_Wantwrite,
  821. &sh->dev[disk_index].flags))
  822. continue;
  823. /* in case device is broken */
  824. rdev = rcu_dereference(conf->disks[disk_index].rdev);
  825. if (rdev)
  826. sync_page_io(rdev, stripe_sect, PAGE_SIZE,
  827. sh->dev[disk_index].page, WRITE, false);
  828. rrdev = rcu_dereference(conf->disks[disk_index].replacement);
  829. if (rrdev)
  830. sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
  831. sh->dev[disk_index].page, WRITE, false);
  832. }
  833. raid5_release_stripe(sh);
  834. return 0;
  835. error:
  836. for (disk_index = 0; disk_index < sh->disks; disk_index++)
  837. sh->dev[disk_index].flags = 0;
  838. raid5_release_stripe(sh);
  839. return -EINVAL;
  840. }
  841. static int r5l_recovery_flush_one_meta(struct r5l_log *log,
  842. struct r5l_recovery_ctx *ctx)
  843. {
  844. struct r5conf *conf = log->rdev->mddev->private;
  845. struct r5l_payload_data_parity *payload;
  846. struct r5l_meta_block *mb;
  847. int offset;
  848. sector_t log_offset;
  849. sector_t stripe_sector;
  850. mb = page_address(ctx->meta_page);
  851. offset = sizeof(struct r5l_meta_block);
  852. log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
  853. while (offset < le32_to_cpu(mb->meta_size)) {
  854. int dd;
  855. payload = (void *)mb + offset;
  856. stripe_sector = raid5_compute_sector(conf,
  857. le64_to_cpu(payload->location), 0, &dd, NULL);
  858. if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
  859. &offset, &log_offset))
  860. return -EINVAL;
  861. }
  862. return 0;
  863. }
  864. /* copy data/parity from log to raid disks */
  865. static void r5l_recovery_flush_log(struct r5l_log *log,
  866. struct r5l_recovery_ctx *ctx)
  867. {
  868. while (1) {
  869. if (r5l_read_meta_block(log, ctx))
  870. return;
  871. if (r5l_recovery_flush_one_meta(log, ctx))
  872. return;
  873. ctx->seq++;
  874. ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
  875. }
  876. }
  877. static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
  878. u64 seq)
  879. {
  880. struct page *page;
  881. struct r5l_meta_block *mb;
  882. u32 crc;
  883. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  884. if (!page)
  885. return -ENOMEM;
  886. mb = page_address(page);
  887. mb->magic = cpu_to_le32(R5LOG_MAGIC);
  888. mb->version = R5LOG_VERSION;
  889. mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
  890. mb->seq = cpu_to_le64(seq);
  891. mb->position = cpu_to_le64(pos);
  892. crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
  893. mb->checksum = cpu_to_le32(crc);
  894. if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
  895. __free_page(page);
  896. return -EIO;
  897. }
  898. __free_page(page);
  899. return 0;
  900. }
  901. static int r5l_recovery_log(struct r5l_log *log)
  902. {
  903. struct r5l_recovery_ctx ctx;
  904. ctx.pos = log->last_checkpoint;
  905. ctx.seq = log->last_cp_seq;
  906. ctx.meta_page = alloc_page(GFP_KERNEL);
  907. if (!ctx.meta_page)
  908. return -ENOMEM;
  909. r5l_recovery_flush_log(log, &ctx);
  910. __free_page(ctx.meta_page);
  911. /*
  912. * we did a recovery. Now ctx.pos points to an invalid meta block. New
  913. * log will start here. but we can't let superblock point to last valid
  914. * meta block. The log might looks like:
  915. * | meta 1| meta 2| meta 3|
  916. * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
  917. * superblock points to meta 1, we write a new valid meta 2n. if crash
  918. * happens again, new recovery will start from meta 1. Since meta 2n is
  919. * valid now, recovery will think meta 3 is valid, which is wrong.
  920. * The solution is we create a new meta in meta2 with its seq == meta
  921. * 1's seq + 10 and let superblock points to meta2. The same recovery will
  922. * not think meta 3 is a valid meta, because its seq doesn't match
  923. */
  924. if (ctx.seq > log->last_cp_seq + 1) {
  925. int ret;
  926. ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
  927. if (ret)
  928. return ret;
  929. log->seq = ctx.seq + 11;
  930. log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
  931. r5l_write_super(log, ctx.pos);
  932. } else {
  933. log->log_start = ctx.pos;
  934. log->seq = ctx.seq;
  935. }
  936. return 0;
  937. }
  938. static void r5l_write_super(struct r5l_log *log, sector_t cp)
  939. {
  940. struct mddev *mddev = log->rdev->mddev;
  941. log->rdev->journal_tail = cp;
  942. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  943. }
  944. static int r5l_load_log(struct r5l_log *log)
  945. {
  946. struct md_rdev *rdev = log->rdev;
  947. struct page *page;
  948. struct r5l_meta_block *mb;
  949. sector_t cp = log->rdev->journal_tail;
  950. u32 stored_crc, expected_crc;
  951. bool create_super = false;
  952. int ret;
  953. /* Make sure it's valid */
  954. if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
  955. cp = 0;
  956. page = alloc_page(GFP_KERNEL);
  957. if (!page)
  958. return -ENOMEM;
  959. if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
  960. ret = -EIO;
  961. goto ioerr;
  962. }
  963. mb = page_address(page);
  964. if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
  965. mb->version != R5LOG_VERSION) {
  966. create_super = true;
  967. goto create;
  968. }
  969. stored_crc = le32_to_cpu(mb->checksum);
  970. mb->checksum = 0;
  971. expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
  972. if (stored_crc != expected_crc) {
  973. create_super = true;
  974. goto create;
  975. }
  976. if (le64_to_cpu(mb->position) != cp) {
  977. create_super = true;
  978. goto create;
  979. }
  980. create:
  981. if (create_super) {
  982. log->last_cp_seq = prandom_u32();
  983. cp = 0;
  984. /*
  985. * Make sure super points to correct address. Log might have
  986. * data very soon. If super hasn't correct log tail address,
  987. * recovery can't find the log
  988. */
  989. r5l_write_super(log, cp);
  990. } else
  991. log->last_cp_seq = le64_to_cpu(mb->seq);
  992. log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
  993. log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
  994. if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
  995. log->max_free_space = RECLAIM_MAX_FREE_SPACE;
  996. log->last_checkpoint = cp;
  997. __free_page(page);
  998. return r5l_recovery_log(log);
  999. ioerr:
  1000. __free_page(page);
  1001. return ret;
  1002. }
  1003. int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
  1004. {
  1005. struct r5l_log *log;
  1006. if (PAGE_SIZE != 4096)
  1007. return -EINVAL;
  1008. log = kzalloc(sizeof(*log), GFP_KERNEL);
  1009. if (!log)
  1010. return -ENOMEM;
  1011. log->rdev = rdev;
  1012. log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
  1013. log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
  1014. sizeof(rdev->mddev->uuid));
  1015. mutex_init(&log->io_mutex);
  1016. spin_lock_init(&log->io_list_lock);
  1017. INIT_LIST_HEAD(&log->running_ios);
  1018. INIT_LIST_HEAD(&log->io_end_ios);
  1019. INIT_LIST_HEAD(&log->flushing_ios);
  1020. INIT_LIST_HEAD(&log->finished_ios);
  1021. bio_init(&log->flush_bio);
  1022. log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
  1023. if (!log->io_kc)
  1024. goto io_kc;
  1025. log->io_pool = mempool_create_slab_pool(R5L_POOL_SIZE, log->io_kc);
  1026. if (!log->io_pool)
  1027. goto io_pool;
  1028. log->bs = bioset_create(R5L_POOL_SIZE, 0);
  1029. if (!log->bs)
  1030. goto io_bs;
  1031. log->meta_pool = mempool_create_page_pool(R5L_POOL_SIZE, 0);
  1032. if (!log->meta_pool)
  1033. goto out_mempool;
  1034. log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
  1035. log->rdev->mddev, "reclaim");
  1036. if (!log->reclaim_thread)
  1037. goto reclaim_thread;
  1038. init_waitqueue_head(&log->iounit_wait);
  1039. INIT_LIST_HEAD(&log->no_mem_stripes);
  1040. INIT_LIST_HEAD(&log->no_space_stripes);
  1041. spin_lock_init(&log->no_space_stripes_lock);
  1042. if (r5l_load_log(log))
  1043. goto error;
  1044. rcu_assign_pointer(conf->log, log);
  1045. set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
  1046. return 0;
  1047. error:
  1048. md_unregister_thread(&log->reclaim_thread);
  1049. reclaim_thread:
  1050. mempool_destroy(log->meta_pool);
  1051. out_mempool:
  1052. bioset_free(log->bs);
  1053. io_bs:
  1054. mempool_destroy(log->io_pool);
  1055. io_pool:
  1056. kmem_cache_destroy(log->io_kc);
  1057. io_kc:
  1058. kfree(log);
  1059. return -EINVAL;
  1060. }
  1061. void r5l_exit_log(struct r5l_log *log)
  1062. {
  1063. md_unregister_thread(&log->reclaim_thread);
  1064. mempool_destroy(log->meta_pool);
  1065. bioset_free(log->bs);
  1066. mempool_destroy(log->io_pool);
  1067. kmem_cache_destroy(log->io_kc);
  1068. kfree(log);
  1069. }