raid5-cache.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. /*
  2. * Copyright (C) 2015 Shaohua Li <shli@fb.com>
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/wait.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/slab.h>
  18. #include <linux/raid/md_p.h>
  19. #include <linux/crc32c.h>
  20. #include <linux/random.h>
  21. #include "md.h"
  22. #include "raid5.h"
  23. /*
  24. * metadata/data stored in disk with 4k size unit (a block) regardless
  25. * underneath hardware sector size. only works with PAGE_SIZE == 4096
  26. */
  27. #define BLOCK_SECTORS (8)
  28. /*
  29. * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
  30. * recovery scans a very long log
  31. */
  32. #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
  33. #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
  34. struct r5l_log {
  35. struct md_rdev *rdev;
  36. u32 uuid_checksum;
  37. sector_t device_size; /* log device size, round to
  38. * BLOCK_SECTORS */
  39. sector_t max_free_space; /* reclaim run if free space is at
  40. * this size */
  41. sector_t last_checkpoint; /* log tail. where recovery scan
  42. * starts from */
  43. u64 last_cp_seq; /* log tail sequence */
  44. sector_t log_start; /* log head. where new data appends */
  45. u64 seq; /* log head sequence */
  46. struct mutex io_mutex;
  47. struct r5l_io_unit *current_io; /* current io_unit accepting new data */
  48. spinlock_t io_list_lock;
  49. struct list_head running_ios; /* io_units which are still running,
  50. * and have not yet been completely
  51. * written to the log */
  52. struct list_head io_end_ios; /* io_units which have been completely
  53. * written to the log but not yet written
  54. * to the RAID */
  55. struct list_head flushing_ios; /* io_units which are waiting for log
  56. * cache flush */
  57. struct list_head flushed_ios; /* io_units which settle down in log disk */
  58. struct bio flush_bio;
  59. struct list_head stripe_end_ios;/* io_units which have been completely
  60. * written to the RAID but have not yet
  61. * been considered for updating super */
  62. struct kmem_cache *io_kc;
  63. struct md_thread *reclaim_thread;
  64. unsigned long reclaim_target; /* number of space that need to be
  65. * reclaimed. if it's 0, reclaim spaces
  66. * used by io_units which are in
  67. * IO_UNIT_STRIPE_END state (eg, reclaim
  68. * dones't wait for specific io_unit
  69. * switching to IO_UNIT_STRIPE_END
  70. * state) */
  71. wait_queue_head_t iounit_wait;
  72. struct list_head no_space_stripes; /* pending stripes, log has no space */
  73. spinlock_t no_space_stripes_lock;
  74. };
  75. /*
  76. * an IO range starts from a meta data block and end at the next meta data
  77. * block. The io unit's the meta data block tracks data/parity followed it. io
  78. * unit is written to log disk with normal write, as we always flush log disk
  79. * first and then start move data to raid disks, there is no requirement to
  80. * write io unit with FLUSH/FUA
  81. */
  82. struct r5l_io_unit {
  83. struct r5l_log *log;
  84. struct page *meta_page; /* store meta block */
  85. int meta_offset; /* current offset in meta_page */
  86. struct bio_list bios;
  87. atomic_t pending_io; /* pending bios not written to log yet */
  88. struct bio *current_bio;/* current_bio accepting new data */
  89. atomic_t pending_stripe;/* how many stripes not flushed to raid */
  90. u64 seq; /* seq number of the metablock */
  91. sector_t log_start; /* where the io_unit starts */
  92. sector_t log_end; /* where the io_unit ends */
  93. struct list_head log_sibling; /* log->running_ios */
  94. struct list_head stripe_list; /* stripes added to the io_unit */
  95. int state;
  96. };
  97. /* r5l_io_unit state */
  98. enum r5l_io_unit_state {
  99. IO_UNIT_RUNNING = 0, /* accepting new IO */
  100. IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
  101. * don't accepting new bio */
  102. IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
  103. IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
  104. };
  105. static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
  106. {
  107. start += inc;
  108. if (start >= log->device_size)
  109. start = start - log->device_size;
  110. return start;
  111. }
  112. static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
  113. sector_t end)
  114. {
  115. if (end >= start)
  116. return end - start;
  117. else
  118. return end + log->device_size - start;
  119. }
  120. static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
  121. {
  122. sector_t used_size;
  123. used_size = r5l_ring_distance(log, log->last_checkpoint,
  124. log->log_start);
  125. return log->device_size > used_size + size;
  126. }
  127. static struct r5l_io_unit *r5l_alloc_io_unit(struct r5l_log *log)
  128. {
  129. struct r5l_io_unit *io;
  130. /* We can't handle memory allocate failure so far */
  131. gfp_t gfp = GFP_NOIO | __GFP_NOFAIL;
  132. io = kmem_cache_zalloc(log->io_kc, gfp);
  133. io->log = log;
  134. io->meta_page = alloc_page(gfp | __GFP_ZERO);
  135. bio_list_init(&io->bios);
  136. INIT_LIST_HEAD(&io->log_sibling);
  137. INIT_LIST_HEAD(&io->stripe_list);
  138. io->state = IO_UNIT_RUNNING;
  139. return io;
  140. }
  141. static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
  142. {
  143. __free_page(io->meta_page);
  144. kmem_cache_free(log->io_kc, io);
  145. }
  146. static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to,
  147. enum r5l_io_unit_state state)
  148. {
  149. struct r5l_io_unit *io;
  150. while (!list_empty(from)) {
  151. io = list_first_entry(from, struct r5l_io_unit, log_sibling);
  152. /* don't change list order */
  153. if (io->state >= state)
  154. list_move_tail(&io->log_sibling, to);
  155. else
  156. break;
  157. }
  158. }
  159. /*
  160. * We don't want too many io_units reside in stripe_end_ios list, which will
  161. * waste a lot of memory. So we try to remove some. But we must keep at least 2
  162. * io_units. The superblock must point to a valid meta, if it's the last meta,
  163. * recovery can scan less
  164. */
  165. static void r5l_compress_stripe_end_list(struct r5l_log *log)
  166. {
  167. struct r5l_io_unit *first, *last, *io;
  168. first = list_first_entry(&log->stripe_end_ios,
  169. struct r5l_io_unit, log_sibling);
  170. last = list_last_entry(&log->stripe_end_ios,
  171. struct r5l_io_unit, log_sibling);
  172. if (first == last)
  173. return;
  174. list_del(&first->log_sibling);
  175. list_del(&last->log_sibling);
  176. while (!list_empty(&log->stripe_end_ios)) {
  177. io = list_first_entry(&log->stripe_end_ios,
  178. struct r5l_io_unit, log_sibling);
  179. list_del(&io->log_sibling);
  180. first->log_end = io->log_end;
  181. r5l_free_io_unit(log, io);
  182. }
  183. list_add_tail(&first->log_sibling, &log->stripe_end_ios);
  184. list_add_tail(&last->log_sibling, &log->stripe_end_ios);
  185. }
  186. static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
  187. static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
  188. enum r5l_io_unit_state state)
  189. {
  190. struct r5l_log *log = io->log;
  191. if (WARN_ON(io->state >= state))
  192. return;
  193. io->state = state;
  194. if (state == IO_UNIT_IO_END)
  195. r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios,
  196. IO_UNIT_IO_END);
  197. if (state == IO_UNIT_STRIPE_END) {
  198. struct r5l_io_unit *last;
  199. sector_t reclaimable_space;
  200. r5l_move_io_unit_list(&log->flushed_ios, &log->stripe_end_ios,
  201. IO_UNIT_STRIPE_END);
  202. last = list_last_entry(&log->stripe_end_ios,
  203. struct r5l_io_unit, log_sibling);
  204. reclaimable_space = r5l_ring_distance(log, log->last_checkpoint,
  205. last->log_end);
  206. if (reclaimable_space >= log->max_free_space)
  207. r5l_wake_reclaim(log, 0);
  208. r5l_compress_stripe_end_list(log);
  209. wake_up(&log->iounit_wait);
  210. }
  211. }
  212. static void r5l_set_io_unit_state(struct r5l_io_unit *io,
  213. enum r5l_io_unit_state state)
  214. {
  215. struct r5l_log *log = io->log;
  216. unsigned long flags;
  217. spin_lock_irqsave(&log->io_list_lock, flags);
  218. __r5l_set_io_unit_state(io, state);
  219. spin_unlock_irqrestore(&log->io_list_lock, flags);
  220. }
  221. /* XXX: totally ignores I/O errors */
  222. static void r5l_log_endio(struct bio *bio)
  223. {
  224. struct r5l_io_unit *io = bio->bi_private;
  225. struct r5l_log *log = io->log;
  226. bio_put(bio);
  227. if (!atomic_dec_and_test(&io->pending_io))
  228. return;
  229. r5l_set_io_unit_state(io, IO_UNIT_IO_END);
  230. md_wakeup_thread(log->rdev->mddev->thread);
  231. }
  232. static void r5l_submit_current_io(struct r5l_log *log)
  233. {
  234. struct r5l_io_unit *io = log->current_io;
  235. struct r5l_meta_block *block;
  236. struct bio *bio;
  237. u32 crc;
  238. if (!io)
  239. return;
  240. block = page_address(io->meta_page);
  241. block->meta_size = cpu_to_le32(io->meta_offset);
  242. crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
  243. block->checksum = cpu_to_le32(crc);
  244. log->current_io = NULL;
  245. r5l_set_io_unit_state(io, IO_UNIT_IO_START);
  246. while ((bio = bio_list_pop(&io->bios))) {
  247. /* all IO must start from rdev->data_offset */
  248. bio->bi_iter.bi_sector += log->rdev->data_offset;
  249. submit_bio(WRITE, bio);
  250. }
  251. }
  252. static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
  253. {
  254. struct r5l_io_unit *io;
  255. struct r5l_meta_block *block;
  256. struct bio *bio;
  257. io = r5l_alloc_io_unit(log);
  258. block = page_address(io->meta_page);
  259. block->magic = cpu_to_le32(R5LOG_MAGIC);
  260. block->version = R5LOG_VERSION;
  261. block->seq = cpu_to_le64(log->seq);
  262. block->position = cpu_to_le64(log->log_start);
  263. io->log_start = log->log_start;
  264. io->meta_offset = sizeof(struct r5l_meta_block);
  265. io->seq = log->seq;
  266. bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
  267. io->current_bio = bio;
  268. bio->bi_rw = WRITE;
  269. bio->bi_bdev = log->rdev->bdev;
  270. bio->bi_iter.bi_sector = log->log_start;
  271. bio_add_page(bio, io->meta_page, PAGE_SIZE, 0);
  272. bio->bi_end_io = r5l_log_endio;
  273. bio->bi_private = io;
  274. bio_list_add(&io->bios, bio);
  275. atomic_inc(&io->pending_io);
  276. log->seq++;
  277. log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
  278. io->log_end = log->log_start;
  279. /* current bio hit disk end */
  280. if (log->log_start == 0)
  281. io->current_bio = NULL;
  282. spin_lock_irq(&log->io_list_lock);
  283. list_add_tail(&io->log_sibling, &log->running_ios);
  284. spin_unlock_irq(&log->io_list_lock);
  285. return io;
  286. }
  287. static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
  288. {
  289. struct r5l_io_unit *io;
  290. io = log->current_io;
  291. if (io && io->meta_offset + payload_size > PAGE_SIZE)
  292. r5l_submit_current_io(log);
  293. io = log->current_io;
  294. if (io)
  295. return 0;
  296. log->current_io = r5l_new_meta(log);
  297. return 0;
  298. }
  299. static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
  300. sector_t location,
  301. u32 checksum1, u32 checksum2,
  302. bool checksum2_valid)
  303. {
  304. struct r5l_io_unit *io = log->current_io;
  305. struct r5l_payload_data_parity *payload;
  306. payload = page_address(io->meta_page) + io->meta_offset;
  307. payload->header.type = cpu_to_le16(type);
  308. payload->header.flags = cpu_to_le16(0);
  309. payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
  310. (PAGE_SHIFT - 9));
  311. payload->location = cpu_to_le64(location);
  312. payload->checksum[0] = cpu_to_le32(checksum1);
  313. if (checksum2_valid)
  314. payload->checksum[1] = cpu_to_le32(checksum2);
  315. io->meta_offset += sizeof(struct r5l_payload_data_parity) +
  316. sizeof(__le32) * (1 + !!checksum2_valid);
  317. }
  318. static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
  319. {
  320. struct r5l_io_unit *io = log->current_io;
  321. alloc_bio:
  322. if (!io->current_bio) {
  323. struct bio *bio;
  324. bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
  325. bio->bi_rw = WRITE;
  326. bio->bi_bdev = log->rdev->bdev;
  327. bio->bi_iter.bi_sector = log->log_start;
  328. bio->bi_end_io = r5l_log_endio;
  329. bio->bi_private = io;
  330. bio_list_add(&io->bios, bio);
  331. atomic_inc(&io->pending_io);
  332. io->current_bio = bio;
  333. }
  334. if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) {
  335. io->current_bio = NULL;
  336. goto alloc_bio;
  337. }
  338. log->log_start = r5l_ring_add(log, log->log_start,
  339. BLOCK_SECTORS);
  340. /* current bio hit disk end */
  341. if (log->log_start == 0)
  342. io->current_bio = NULL;
  343. io->log_end = log->log_start;
  344. }
  345. static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
  346. int data_pages, int parity_pages)
  347. {
  348. int i;
  349. int meta_size;
  350. struct r5l_io_unit *io;
  351. meta_size =
  352. ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
  353. * data_pages) +
  354. sizeof(struct r5l_payload_data_parity) +
  355. sizeof(__le32) * parity_pages;
  356. r5l_get_meta(log, meta_size);
  357. io = log->current_io;
  358. for (i = 0; i < sh->disks; i++) {
  359. if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
  360. continue;
  361. if (i == sh->pd_idx || i == sh->qd_idx)
  362. continue;
  363. r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
  364. raid5_compute_blocknr(sh, i, 0),
  365. sh->dev[i].log_checksum, 0, false);
  366. r5l_append_payload_page(log, sh->dev[i].page);
  367. }
  368. if (sh->qd_idx >= 0) {
  369. r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
  370. sh->sector, sh->dev[sh->pd_idx].log_checksum,
  371. sh->dev[sh->qd_idx].log_checksum, true);
  372. r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
  373. r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
  374. } else {
  375. r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
  376. sh->sector, sh->dev[sh->pd_idx].log_checksum,
  377. 0, false);
  378. r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
  379. }
  380. list_add_tail(&sh->log_list, &io->stripe_list);
  381. atomic_inc(&io->pending_stripe);
  382. sh->log_io = io;
  383. }
  384. /*
  385. * running in raid5d, where reclaim could wait for raid5d too (when it flushes
  386. * data from log to raid disks), so we shouldn't wait for reclaim here
  387. */
  388. int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
  389. {
  390. int write_disks = 0;
  391. int data_pages, parity_pages;
  392. int meta_size;
  393. int reserve;
  394. int i;
  395. if (!log)
  396. return -EAGAIN;
  397. /* Don't support stripe batch */
  398. if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
  399. test_bit(STRIPE_SYNCING, &sh->state)) {
  400. /* the stripe is written to log, we start writing it to raid */
  401. clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
  402. return -EAGAIN;
  403. }
  404. for (i = 0; i < sh->disks; i++) {
  405. void *addr;
  406. if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
  407. continue;
  408. write_disks++;
  409. /* checksum is already calculated in last run */
  410. if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
  411. continue;
  412. addr = kmap_atomic(sh->dev[i].page);
  413. sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
  414. addr, PAGE_SIZE);
  415. kunmap_atomic(addr);
  416. }
  417. parity_pages = 1 + !!(sh->qd_idx >= 0);
  418. data_pages = write_disks - parity_pages;
  419. meta_size =
  420. ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
  421. * data_pages) +
  422. sizeof(struct r5l_payload_data_parity) +
  423. sizeof(__le32) * parity_pages;
  424. /* Doesn't work with very big raid array */
  425. if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
  426. return -EINVAL;
  427. set_bit(STRIPE_LOG_TRAPPED, &sh->state);
  428. atomic_inc(&sh->count);
  429. mutex_lock(&log->io_mutex);
  430. /* meta + data */
  431. reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
  432. if (r5l_has_free_space(log, reserve))
  433. r5l_log_stripe(log, sh, data_pages, parity_pages);
  434. else {
  435. spin_lock(&log->no_space_stripes_lock);
  436. list_add_tail(&sh->log_list, &log->no_space_stripes);
  437. spin_unlock(&log->no_space_stripes_lock);
  438. r5l_wake_reclaim(log, reserve);
  439. }
  440. mutex_unlock(&log->io_mutex);
  441. return 0;
  442. }
  443. void r5l_write_stripe_run(struct r5l_log *log)
  444. {
  445. if (!log)
  446. return;
  447. mutex_lock(&log->io_mutex);
  448. r5l_submit_current_io(log);
  449. mutex_unlock(&log->io_mutex);
  450. }
  451. /* This will run after log space is reclaimed */
  452. static void r5l_run_no_space_stripes(struct r5l_log *log)
  453. {
  454. struct stripe_head *sh;
  455. spin_lock(&log->no_space_stripes_lock);
  456. while (!list_empty(&log->no_space_stripes)) {
  457. sh = list_first_entry(&log->no_space_stripes,
  458. struct stripe_head, log_list);
  459. list_del_init(&sh->log_list);
  460. set_bit(STRIPE_HANDLE, &sh->state);
  461. raid5_release_stripe(sh);
  462. }
  463. spin_unlock(&log->no_space_stripes_lock);
  464. }
  465. void r5l_stripe_write_finished(struct stripe_head *sh)
  466. {
  467. struct r5l_io_unit *io;
  468. /* Don't support stripe batch */
  469. io = sh->log_io;
  470. if (!io)
  471. return;
  472. sh->log_io = NULL;
  473. if (atomic_dec_and_test(&io->pending_stripe))
  474. r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
  475. }
  476. static void r5l_log_flush_endio(struct bio *bio)
  477. {
  478. struct r5l_log *log = container_of(bio, struct r5l_log,
  479. flush_bio);
  480. unsigned long flags;
  481. struct r5l_io_unit *io;
  482. struct stripe_head *sh;
  483. spin_lock_irqsave(&log->io_list_lock, flags);
  484. list_for_each_entry(io, &log->flushing_ios, log_sibling) {
  485. while (!list_empty(&io->stripe_list)) {
  486. sh = list_first_entry(&io->stripe_list,
  487. struct stripe_head, log_list);
  488. list_del_init(&sh->log_list);
  489. set_bit(STRIPE_HANDLE, &sh->state);
  490. raid5_release_stripe(sh);
  491. }
  492. }
  493. list_splice_tail_init(&log->flushing_ios, &log->flushed_ios);
  494. spin_unlock_irqrestore(&log->io_list_lock, flags);
  495. }
  496. /*
  497. * Starting dispatch IO to raid.
  498. * io_unit(meta) consists of a log. There is one situation we want to avoid. A
  499. * broken meta in the middle of a log causes recovery can't find meta at the
  500. * head of log. If operations require meta at the head persistent in log, we
  501. * must make sure meta before it persistent in log too. A case is:
  502. *
  503. * stripe data/parity is in log, we start write stripe to raid disks. stripe
  504. * data/parity must be persistent in log before we do the write to raid disks.
  505. *
  506. * The solution is we restrictly maintain io_unit list order. In this case, we
  507. * only write stripes of an io_unit to raid disks till the io_unit is the first
  508. * one whose data/parity is in log.
  509. */
  510. void r5l_flush_stripe_to_raid(struct r5l_log *log)
  511. {
  512. bool do_flush;
  513. if (!log)
  514. return;
  515. spin_lock_irq(&log->io_list_lock);
  516. /* flush bio is running */
  517. if (!list_empty(&log->flushing_ios)) {
  518. spin_unlock_irq(&log->io_list_lock);
  519. return;
  520. }
  521. list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
  522. do_flush = !list_empty(&log->flushing_ios);
  523. spin_unlock_irq(&log->io_list_lock);
  524. if (!do_flush)
  525. return;
  526. bio_reset(&log->flush_bio);
  527. log->flush_bio.bi_bdev = log->rdev->bdev;
  528. log->flush_bio.bi_end_io = r5l_log_flush_endio;
  529. submit_bio(WRITE_FLUSH, &log->flush_bio);
  530. }
  531. static void r5l_kick_io_unit(struct r5l_log *log)
  532. {
  533. md_wakeup_thread(log->rdev->mddev->thread);
  534. wait_event_lock_irq(log->iounit_wait, !list_empty(&log->stripe_end_ios),
  535. log->io_list_lock);
  536. }
  537. static void r5l_write_super(struct r5l_log *log, sector_t cp);
  538. static void r5l_do_reclaim(struct r5l_log *log)
  539. {
  540. struct r5l_io_unit *io, *last;
  541. LIST_HEAD(list);
  542. sector_t free = 0;
  543. sector_t reclaim_target = xchg(&log->reclaim_target, 0);
  544. spin_lock_irq(&log->io_list_lock);
  545. /*
  546. * move proper io_unit to reclaim list. We should not change the order.
  547. * reclaimable/unreclaimable io_unit can be mixed in the list, we
  548. * shouldn't reuse space of an unreclaimable io_unit
  549. */
  550. while (1) {
  551. struct list_head *target_list = NULL;
  552. while (!list_empty(&log->stripe_end_ios)) {
  553. io = list_first_entry(&log->stripe_end_ios,
  554. struct r5l_io_unit, log_sibling);
  555. list_move_tail(&io->log_sibling, &list);
  556. free += r5l_ring_distance(log, io->log_start,
  557. io->log_end);
  558. }
  559. if (free >= reclaim_target ||
  560. (list_empty(&log->running_ios) &&
  561. list_empty(&log->io_end_ios) &&
  562. list_empty(&log->flushing_ios) &&
  563. list_empty(&log->flushed_ios)))
  564. break;
  565. /* Below waiting mostly happens when we shutdown the raid */
  566. if (!list_empty(&log->flushed_ios))
  567. target_list = &log->flushed_ios;
  568. else if (!list_empty(&log->flushing_ios))
  569. target_list = &log->flushing_ios;
  570. else if (!list_empty(&log->io_end_ios))
  571. target_list = &log->io_end_ios;
  572. else if (!list_empty(&log->running_ios))
  573. target_list = &log->running_ios;
  574. r5l_kick_io_unit(log);
  575. }
  576. spin_unlock_irq(&log->io_list_lock);
  577. if (list_empty(&list))
  578. return;
  579. /* super always point to last valid meta */
  580. last = list_last_entry(&list, struct r5l_io_unit, log_sibling);
  581. /*
  582. * write_super will flush cache of each raid disk. We must write super
  583. * here, because the log area might be reused soon and we don't want to
  584. * confuse recovery
  585. */
  586. r5l_write_super(log, last->log_start);
  587. mutex_lock(&log->io_mutex);
  588. log->last_checkpoint = last->log_start;
  589. log->last_cp_seq = last->seq;
  590. mutex_unlock(&log->io_mutex);
  591. r5l_run_no_space_stripes(log);
  592. while (!list_empty(&list)) {
  593. io = list_first_entry(&list, struct r5l_io_unit, log_sibling);
  594. list_del(&io->log_sibling);
  595. r5l_free_io_unit(log, io);
  596. }
  597. }
  598. static void r5l_reclaim_thread(struct md_thread *thread)
  599. {
  600. struct mddev *mddev = thread->mddev;
  601. struct r5conf *conf = mddev->private;
  602. struct r5l_log *log = conf->log;
  603. if (!log)
  604. return;
  605. r5l_do_reclaim(log);
  606. }
  607. static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
  608. {
  609. unsigned long target;
  610. unsigned long new = (unsigned long)space; /* overflow in theory */
  611. do {
  612. target = log->reclaim_target;
  613. if (new < target)
  614. return;
  615. } while (cmpxchg(&log->reclaim_target, target, new) != target);
  616. md_wakeup_thread(log->reclaim_thread);
  617. }
  618. struct r5l_recovery_ctx {
  619. struct page *meta_page; /* current meta */
  620. sector_t meta_total_blocks; /* total size of current meta and data */
  621. sector_t pos; /* recovery position */
  622. u64 seq; /* recovery position seq */
  623. };
  624. static int r5l_read_meta_block(struct r5l_log *log,
  625. struct r5l_recovery_ctx *ctx)
  626. {
  627. struct page *page = ctx->meta_page;
  628. struct r5l_meta_block *mb;
  629. u32 crc, stored_crc;
  630. if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
  631. return -EIO;
  632. mb = page_address(page);
  633. stored_crc = le32_to_cpu(mb->checksum);
  634. mb->checksum = 0;
  635. if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
  636. le64_to_cpu(mb->seq) != ctx->seq ||
  637. mb->version != R5LOG_VERSION ||
  638. le64_to_cpu(mb->position) != ctx->pos)
  639. return -EINVAL;
  640. crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
  641. if (stored_crc != crc)
  642. return -EINVAL;
  643. if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
  644. return -EINVAL;
  645. ctx->meta_total_blocks = BLOCK_SECTORS;
  646. return 0;
  647. }
  648. static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
  649. struct r5l_recovery_ctx *ctx,
  650. sector_t stripe_sect,
  651. int *offset, sector_t *log_offset)
  652. {
  653. struct r5conf *conf = log->rdev->mddev->private;
  654. struct stripe_head *sh;
  655. struct r5l_payload_data_parity *payload;
  656. int disk_index;
  657. sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
  658. while (1) {
  659. payload = page_address(ctx->meta_page) + *offset;
  660. if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
  661. raid5_compute_sector(conf,
  662. le64_to_cpu(payload->location), 0,
  663. &disk_index, sh);
  664. sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
  665. sh->dev[disk_index].page, READ, false);
  666. sh->dev[disk_index].log_checksum =
  667. le32_to_cpu(payload->checksum[0]);
  668. set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
  669. ctx->meta_total_blocks += BLOCK_SECTORS;
  670. } else {
  671. disk_index = sh->pd_idx;
  672. sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
  673. sh->dev[disk_index].page, READ, false);
  674. sh->dev[disk_index].log_checksum =
  675. le32_to_cpu(payload->checksum[0]);
  676. set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
  677. if (sh->qd_idx >= 0) {
  678. disk_index = sh->qd_idx;
  679. sync_page_io(log->rdev,
  680. r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
  681. PAGE_SIZE, sh->dev[disk_index].page,
  682. READ, false);
  683. sh->dev[disk_index].log_checksum =
  684. le32_to_cpu(payload->checksum[1]);
  685. set_bit(R5_Wantwrite,
  686. &sh->dev[disk_index].flags);
  687. }
  688. ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
  689. }
  690. *log_offset = r5l_ring_add(log, *log_offset,
  691. le32_to_cpu(payload->size));
  692. *offset += sizeof(struct r5l_payload_data_parity) +
  693. sizeof(__le32) *
  694. (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
  695. if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
  696. break;
  697. }
  698. for (disk_index = 0; disk_index < sh->disks; disk_index++) {
  699. void *addr;
  700. u32 checksum;
  701. if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
  702. continue;
  703. addr = kmap_atomic(sh->dev[disk_index].page);
  704. checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
  705. kunmap_atomic(addr);
  706. if (checksum != sh->dev[disk_index].log_checksum)
  707. goto error;
  708. }
  709. for (disk_index = 0; disk_index < sh->disks; disk_index++) {
  710. struct md_rdev *rdev, *rrdev;
  711. if (!test_and_clear_bit(R5_Wantwrite,
  712. &sh->dev[disk_index].flags))
  713. continue;
  714. /* in case device is broken */
  715. rdev = rcu_dereference(conf->disks[disk_index].rdev);
  716. if (rdev)
  717. sync_page_io(rdev, stripe_sect, PAGE_SIZE,
  718. sh->dev[disk_index].page, WRITE, false);
  719. rrdev = rcu_dereference(conf->disks[disk_index].replacement);
  720. if (rrdev)
  721. sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
  722. sh->dev[disk_index].page, WRITE, false);
  723. }
  724. raid5_release_stripe(sh);
  725. return 0;
  726. error:
  727. for (disk_index = 0; disk_index < sh->disks; disk_index++)
  728. sh->dev[disk_index].flags = 0;
  729. raid5_release_stripe(sh);
  730. return -EINVAL;
  731. }
  732. static int r5l_recovery_flush_one_meta(struct r5l_log *log,
  733. struct r5l_recovery_ctx *ctx)
  734. {
  735. struct r5conf *conf = log->rdev->mddev->private;
  736. struct r5l_payload_data_parity *payload;
  737. struct r5l_meta_block *mb;
  738. int offset;
  739. sector_t log_offset;
  740. sector_t stripe_sector;
  741. mb = page_address(ctx->meta_page);
  742. offset = sizeof(struct r5l_meta_block);
  743. log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
  744. while (offset < le32_to_cpu(mb->meta_size)) {
  745. int dd;
  746. payload = (void *)mb + offset;
  747. stripe_sector = raid5_compute_sector(conf,
  748. le64_to_cpu(payload->location), 0, &dd, NULL);
  749. if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
  750. &offset, &log_offset))
  751. return -EINVAL;
  752. }
  753. return 0;
  754. }
  755. /* copy data/parity from log to raid disks */
  756. static void r5l_recovery_flush_log(struct r5l_log *log,
  757. struct r5l_recovery_ctx *ctx)
  758. {
  759. while (1) {
  760. if (r5l_read_meta_block(log, ctx))
  761. return;
  762. if (r5l_recovery_flush_one_meta(log, ctx))
  763. return;
  764. ctx->seq++;
  765. ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
  766. }
  767. }
  768. static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
  769. u64 seq)
  770. {
  771. struct page *page;
  772. struct r5l_meta_block *mb;
  773. u32 crc;
  774. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  775. if (!page)
  776. return -ENOMEM;
  777. mb = page_address(page);
  778. mb->magic = cpu_to_le32(R5LOG_MAGIC);
  779. mb->version = R5LOG_VERSION;
  780. mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
  781. mb->seq = cpu_to_le64(seq);
  782. mb->position = cpu_to_le64(pos);
  783. crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
  784. mb->checksum = cpu_to_le32(crc);
  785. if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
  786. __free_page(page);
  787. return -EIO;
  788. }
  789. __free_page(page);
  790. return 0;
  791. }
  792. static int r5l_recovery_log(struct r5l_log *log)
  793. {
  794. struct r5l_recovery_ctx ctx;
  795. ctx.pos = log->last_checkpoint;
  796. ctx.seq = log->last_cp_seq;
  797. ctx.meta_page = alloc_page(GFP_KERNEL);
  798. if (!ctx.meta_page)
  799. return -ENOMEM;
  800. r5l_recovery_flush_log(log, &ctx);
  801. __free_page(ctx.meta_page);
  802. /*
  803. * we did a recovery. Now ctx.pos points to an invalid meta block. New
  804. * log will start here. but we can't let superblock point to last valid
  805. * meta block. The log might looks like:
  806. * | meta 1| meta 2| meta 3|
  807. * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
  808. * superblock points to meta 1, we write a new valid meta 2n. if crash
  809. * happens again, new recovery will start from meta 1. Since meta 2n is
  810. * valid now, recovery will think meta 3 is valid, which is wrong.
  811. * The solution is we create a new meta in meta2 with its seq == meta
  812. * 1's seq + 10 and let superblock points to meta2. The same recovery will
  813. * not think meta 3 is a valid meta, because its seq doesn't match
  814. */
  815. if (ctx.seq > log->last_cp_seq + 1) {
  816. int ret;
  817. ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
  818. if (ret)
  819. return ret;
  820. log->seq = ctx.seq + 11;
  821. log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
  822. r5l_write_super(log, ctx.pos);
  823. } else {
  824. log->log_start = ctx.pos;
  825. log->seq = ctx.seq;
  826. }
  827. return 0;
  828. }
  829. static void r5l_write_super(struct r5l_log *log, sector_t cp)
  830. {
  831. struct mddev *mddev = log->rdev->mddev;
  832. log->rdev->journal_tail = cp;
  833. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  834. }
  835. static int r5l_load_log(struct r5l_log *log)
  836. {
  837. struct md_rdev *rdev = log->rdev;
  838. struct page *page;
  839. struct r5l_meta_block *mb;
  840. sector_t cp = log->rdev->journal_tail;
  841. u32 stored_crc, expected_crc;
  842. bool create_super = false;
  843. int ret;
  844. /* Make sure it's valid */
  845. if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
  846. cp = 0;
  847. page = alloc_page(GFP_KERNEL);
  848. if (!page)
  849. return -ENOMEM;
  850. if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
  851. ret = -EIO;
  852. goto ioerr;
  853. }
  854. mb = page_address(page);
  855. if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
  856. mb->version != R5LOG_VERSION) {
  857. create_super = true;
  858. goto create;
  859. }
  860. stored_crc = le32_to_cpu(mb->checksum);
  861. mb->checksum = 0;
  862. expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
  863. if (stored_crc != expected_crc) {
  864. create_super = true;
  865. goto create;
  866. }
  867. if (le64_to_cpu(mb->position) != cp) {
  868. create_super = true;
  869. goto create;
  870. }
  871. create:
  872. if (create_super) {
  873. log->last_cp_seq = prandom_u32();
  874. cp = 0;
  875. /*
  876. * Make sure super points to correct address. Log might have
  877. * data very soon. If super hasn't correct log tail address,
  878. * recovery can't find the log
  879. */
  880. r5l_write_super(log, cp);
  881. } else
  882. log->last_cp_seq = le64_to_cpu(mb->seq);
  883. log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
  884. log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
  885. if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
  886. log->max_free_space = RECLAIM_MAX_FREE_SPACE;
  887. log->last_checkpoint = cp;
  888. __free_page(page);
  889. return r5l_recovery_log(log);
  890. ioerr:
  891. __free_page(page);
  892. return ret;
  893. }
  894. int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
  895. {
  896. struct r5l_log *log;
  897. if (PAGE_SIZE != 4096)
  898. return -EINVAL;
  899. log = kzalloc(sizeof(*log), GFP_KERNEL);
  900. if (!log)
  901. return -ENOMEM;
  902. log->rdev = rdev;
  903. log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
  904. sizeof(rdev->mddev->uuid));
  905. mutex_init(&log->io_mutex);
  906. spin_lock_init(&log->io_list_lock);
  907. INIT_LIST_HEAD(&log->running_ios);
  908. INIT_LIST_HEAD(&log->io_end_ios);
  909. INIT_LIST_HEAD(&log->stripe_end_ios);
  910. INIT_LIST_HEAD(&log->flushing_ios);
  911. INIT_LIST_HEAD(&log->flushed_ios);
  912. bio_init(&log->flush_bio);
  913. log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
  914. if (!log->io_kc)
  915. goto io_kc;
  916. log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
  917. log->rdev->mddev, "reclaim");
  918. if (!log->reclaim_thread)
  919. goto reclaim_thread;
  920. init_waitqueue_head(&log->iounit_wait);
  921. INIT_LIST_HEAD(&log->no_space_stripes);
  922. spin_lock_init(&log->no_space_stripes_lock);
  923. if (r5l_load_log(log))
  924. goto error;
  925. conf->log = log;
  926. return 0;
  927. error:
  928. md_unregister_thread(&log->reclaim_thread);
  929. reclaim_thread:
  930. kmem_cache_destroy(log->io_kc);
  931. io_kc:
  932. kfree(log);
  933. return -EINVAL;
  934. }
  935. void r5l_exit_log(struct r5l_log *log)
  936. {
  937. /*
  938. * at this point all stripes are finished, so io_unit is at least in
  939. * STRIPE_END state
  940. */
  941. r5l_wake_reclaim(log, -1L);
  942. md_unregister_thread(&log->reclaim_thread);
  943. r5l_do_reclaim(log);
  944. /*
  945. * force a super update, r5l_do_reclaim might updated the super.
  946. * mddev->thread is already stopped
  947. */
  948. md_update_sb(log->rdev->mddev, 1);
  949. kmem_cache_destroy(log->io_kc);
  950. kfree(log);
  951. }