raid5-cache.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138
  1. /*
  2. * Copyright (C) 2015 Shaohua Li <shli@fb.com>
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. */
  14. #include <linux/kernel.h>
  15. #include <linux/wait.h>
  16. #include <linux/blkdev.h>
  17. #include <linux/slab.h>
  18. #include <linux/raid/md_p.h>
  19. #include <linux/crc32c.h>
  20. #include <linux/random.h>
  21. #include "md.h"
  22. #include "raid5.h"
  23. /*
  24. * metadata/data stored in disk with 4k size unit (a block) regardless
  25. * underneath hardware sector size. only works with PAGE_SIZE == 4096
  26. */
  27. #define BLOCK_SECTORS (8)
  28. /*
  29. * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
  30. * recovery scans a very long log
  31. */
  32. #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
  33. #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
  34. struct r5l_log {
  35. struct md_rdev *rdev;
  36. u32 uuid_checksum;
  37. sector_t device_size; /* log device size, round to
  38. * BLOCK_SECTORS */
  39. sector_t max_free_space; /* reclaim run if free space is at
  40. * this size */
  41. sector_t last_checkpoint; /* log tail. where recovery scan
  42. * starts from */
  43. u64 last_cp_seq; /* log tail sequence */
  44. sector_t log_start; /* log head. where new data appends */
  45. u64 seq; /* log head sequence */
  46. struct mutex io_mutex;
  47. struct r5l_io_unit *current_io; /* current io_unit accepting new data */
  48. spinlock_t io_list_lock;
  49. struct list_head running_ios; /* io_units which are still running,
  50. * and have not yet been completely
  51. * written to the log */
  52. struct list_head io_end_ios; /* io_units which have been completely
  53. * written to the log but not yet written
  54. * to the RAID */
  55. struct list_head flushing_ios; /* io_units which are waiting for log
  56. * cache flush */
  57. struct list_head flushed_ios; /* io_units which settle down in log disk */
  58. struct bio flush_bio;
  59. struct list_head stripe_end_ios;/* io_units which have been completely
  60. * written to the RAID but have not yet
  61. * been considered for updating super */
  62. struct kmem_cache *io_kc;
  63. struct md_thread *reclaim_thread;
  64. unsigned long reclaim_target; /* number of space that need to be
  65. * reclaimed. if it's 0, reclaim spaces
  66. * used by io_units which are in
  67. * IO_UNIT_STRIPE_END state (eg, reclaim
  68. * dones't wait for specific io_unit
  69. * switching to IO_UNIT_STRIPE_END
  70. * state) */
  71. wait_queue_head_t iounit_wait;
  72. struct list_head no_space_stripes; /* pending stripes, log has no space */
  73. spinlock_t no_space_stripes_lock;
  74. };
  75. /*
  76. * an IO range starts from a meta data block and end at the next meta data
  77. * block. The io unit's the meta data block tracks data/parity followed it. io
  78. * unit is written to log disk with normal write, as we always flush log disk
  79. * first and then start move data to raid disks, there is no requirement to
  80. * write io unit with FLUSH/FUA
  81. */
  82. struct r5l_io_unit {
  83. struct r5l_log *log;
  84. struct page *meta_page; /* store meta block */
  85. int meta_offset; /* current offset in meta_page */
  86. struct bio_list bios;
  87. atomic_t pending_io; /* pending bios not written to log yet */
  88. struct bio *current_bio;/* current_bio accepting new data */
  89. atomic_t pending_stripe;/* how many stripes not flushed to raid */
  90. u64 seq; /* seq number of the metablock */
  91. sector_t log_start; /* where the io_unit starts */
  92. sector_t log_end; /* where the io_unit ends */
  93. struct list_head log_sibling; /* log->running_ios */
  94. struct list_head stripe_list; /* stripes added to the io_unit */
  95. int state;
  96. };
  97. /* r5l_io_unit state */
  98. enum r5l_io_unit_state {
  99. IO_UNIT_RUNNING = 0, /* accepting new IO */
  100. IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
  101. * don't accepting new bio */
  102. IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
  103. IO_UNIT_STRIPE_END = 3, /* stripes data finished writing to raid */
  104. };
  105. static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
  106. {
  107. start += inc;
  108. if (start >= log->device_size)
  109. start = start - log->device_size;
  110. return start;
  111. }
  112. static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
  113. sector_t end)
  114. {
  115. if (end >= start)
  116. return end - start;
  117. else
  118. return end + log->device_size - start;
  119. }
  120. static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
  121. {
  122. sector_t used_size;
  123. used_size = r5l_ring_distance(log, log->last_checkpoint,
  124. log->log_start);
  125. return log->device_size > used_size + size;
  126. }
  127. static struct r5l_io_unit *r5l_alloc_io_unit(struct r5l_log *log)
  128. {
  129. struct r5l_io_unit *io;
  130. /* We can't handle memory allocate failure so far */
  131. gfp_t gfp = GFP_NOIO | __GFP_NOFAIL;
  132. io = kmem_cache_zalloc(log->io_kc, gfp);
  133. io->log = log;
  134. io->meta_page = alloc_page(gfp | __GFP_ZERO);
  135. bio_list_init(&io->bios);
  136. INIT_LIST_HEAD(&io->log_sibling);
  137. INIT_LIST_HEAD(&io->stripe_list);
  138. io->state = IO_UNIT_RUNNING;
  139. return io;
  140. }
  141. static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
  142. {
  143. __free_page(io->meta_page);
  144. kmem_cache_free(log->io_kc, io);
  145. }
  146. static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to,
  147. enum r5l_io_unit_state state)
  148. {
  149. struct r5l_io_unit *io;
  150. while (!list_empty(from)) {
  151. io = list_first_entry(from, struct r5l_io_unit, log_sibling);
  152. /* don't change list order */
  153. if (io->state >= state)
  154. list_move_tail(&io->log_sibling, to);
  155. else
  156. break;
  157. }
  158. }
  159. /*
  160. * We don't want too many io_units reside in stripe_end_ios list, which will
  161. * waste a lot of memory. So we try to remove some. But we must keep at least 2
  162. * io_units. The superblock must point to a valid meta, if it's the last meta,
  163. * recovery can scan less
  164. */
  165. static void r5l_compress_stripe_end_list(struct r5l_log *log)
  166. {
  167. struct r5l_io_unit *first, *last, *io;
  168. first = list_first_entry(&log->stripe_end_ios,
  169. struct r5l_io_unit, log_sibling);
  170. last = list_last_entry(&log->stripe_end_ios,
  171. struct r5l_io_unit, log_sibling);
  172. if (first == last)
  173. return;
  174. list_del(&first->log_sibling);
  175. list_del(&last->log_sibling);
  176. while (!list_empty(&log->stripe_end_ios)) {
  177. io = list_first_entry(&log->stripe_end_ios,
  178. struct r5l_io_unit, log_sibling);
  179. list_del(&io->log_sibling);
  180. first->log_end = io->log_end;
  181. r5l_free_io_unit(log, io);
  182. }
  183. list_add_tail(&first->log_sibling, &log->stripe_end_ios);
  184. list_add_tail(&last->log_sibling, &log->stripe_end_ios);
  185. }
  186. static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
  187. enum r5l_io_unit_state state)
  188. {
  189. if (WARN_ON(io->state >= state))
  190. return;
  191. io->state = state;
  192. }
  193. /* XXX: totally ignores I/O errors */
  194. static void r5l_log_endio(struct bio *bio)
  195. {
  196. struct r5l_io_unit *io = bio->bi_private;
  197. struct r5l_log *log = io->log;
  198. unsigned long flags;
  199. bio_put(bio);
  200. if (!atomic_dec_and_test(&io->pending_io))
  201. return;
  202. spin_lock_irqsave(&log->io_list_lock, flags);
  203. __r5l_set_io_unit_state(io, IO_UNIT_IO_END);
  204. r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios,
  205. IO_UNIT_IO_END);
  206. spin_unlock_irqrestore(&log->io_list_lock, flags);
  207. md_wakeup_thread(log->rdev->mddev->thread);
  208. }
  209. static void r5l_submit_current_io(struct r5l_log *log)
  210. {
  211. struct r5l_io_unit *io = log->current_io;
  212. struct r5l_meta_block *block;
  213. struct bio *bio;
  214. unsigned long flags;
  215. u32 crc;
  216. if (!io)
  217. return;
  218. block = page_address(io->meta_page);
  219. block->meta_size = cpu_to_le32(io->meta_offset);
  220. crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
  221. block->checksum = cpu_to_le32(crc);
  222. log->current_io = NULL;
  223. spin_lock_irqsave(&log->io_list_lock, flags);
  224. __r5l_set_io_unit_state(io, IO_UNIT_IO_START);
  225. spin_unlock_irqrestore(&log->io_list_lock, flags);
  226. while ((bio = bio_list_pop(&io->bios))) {
  227. /* all IO must start from rdev->data_offset */
  228. bio->bi_iter.bi_sector += log->rdev->data_offset;
  229. submit_bio(WRITE, bio);
  230. }
  231. }
  232. static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
  233. {
  234. struct r5l_io_unit *io;
  235. struct r5l_meta_block *block;
  236. struct bio *bio;
  237. io = r5l_alloc_io_unit(log);
  238. block = page_address(io->meta_page);
  239. block->magic = cpu_to_le32(R5LOG_MAGIC);
  240. block->version = R5LOG_VERSION;
  241. block->seq = cpu_to_le64(log->seq);
  242. block->position = cpu_to_le64(log->log_start);
  243. io->log_start = log->log_start;
  244. io->meta_offset = sizeof(struct r5l_meta_block);
  245. io->seq = log->seq;
  246. bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
  247. io->current_bio = bio;
  248. bio->bi_rw = WRITE;
  249. bio->bi_bdev = log->rdev->bdev;
  250. bio->bi_iter.bi_sector = log->log_start;
  251. bio_add_page(bio, io->meta_page, PAGE_SIZE, 0);
  252. bio->bi_end_io = r5l_log_endio;
  253. bio->bi_private = io;
  254. bio_list_add(&io->bios, bio);
  255. atomic_inc(&io->pending_io);
  256. log->seq++;
  257. log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
  258. io->log_end = log->log_start;
  259. /* current bio hit disk end */
  260. if (log->log_start == 0)
  261. io->current_bio = NULL;
  262. spin_lock_irq(&log->io_list_lock);
  263. list_add_tail(&io->log_sibling, &log->running_ios);
  264. spin_unlock_irq(&log->io_list_lock);
  265. return io;
  266. }
  267. static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
  268. {
  269. struct r5l_io_unit *io;
  270. io = log->current_io;
  271. if (io && io->meta_offset + payload_size > PAGE_SIZE)
  272. r5l_submit_current_io(log);
  273. io = log->current_io;
  274. if (io)
  275. return 0;
  276. log->current_io = r5l_new_meta(log);
  277. return 0;
  278. }
  279. static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
  280. sector_t location,
  281. u32 checksum1, u32 checksum2,
  282. bool checksum2_valid)
  283. {
  284. struct r5l_io_unit *io = log->current_io;
  285. struct r5l_payload_data_parity *payload;
  286. payload = page_address(io->meta_page) + io->meta_offset;
  287. payload->header.type = cpu_to_le16(type);
  288. payload->header.flags = cpu_to_le16(0);
  289. payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
  290. (PAGE_SHIFT - 9));
  291. payload->location = cpu_to_le64(location);
  292. payload->checksum[0] = cpu_to_le32(checksum1);
  293. if (checksum2_valid)
  294. payload->checksum[1] = cpu_to_le32(checksum2);
  295. io->meta_offset += sizeof(struct r5l_payload_data_parity) +
  296. sizeof(__le32) * (1 + !!checksum2_valid);
  297. }
  298. static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
  299. {
  300. struct r5l_io_unit *io = log->current_io;
  301. alloc_bio:
  302. if (!io->current_bio) {
  303. struct bio *bio;
  304. bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
  305. bio->bi_rw = WRITE;
  306. bio->bi_bdev = log->rdev->bdev;
  307. bio->bi_iter.bi_sector = log->log_start;
  308. bio->bi_end_io = r5l_log_endio;
  309. bio->bi_private = io;
  310. bio_list_add(&io->bios, bio);
  311. atomic_inc(&io->pending_io);
  312. io->current_bio = bio;
  313. }
  314. if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) {
  315. io->current_bio = NULL;
  316. goto alloc_bio;
  317. }
  318. log->log_start = r5l_ring_add(log, log->log_start,
  319. BLOCK_SECTORS);
  320. /* current bio hit disk end */
  321. if (log->log_start == 0)
  322. io->current_bio = NULL;
  323. io->log_end = log->log_start;
  324. }
  325. static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
  326. int data_pages, int parity_pages)
  327. {
  328. int i;
  329. int meta_size;
  330. struct r5l_io_unit *io;
  331. meta_size =
  332. ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
  333. * data_pages) +
  334. sizeof(struct r5l_payload_data_parity) +
  335. sizeof(__le32) * parity_pages;
  336. r5l_get_meta(log, meta_size);
  337. io = log->current_io;
  338. for (i = 0; i < sh->disks; i++) {
  339. if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
  340. continue;
  341. if (i == sh->pd_idx || i == sh->qd_idx)
  342. continue;
  343. r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
  344. raid5_compute_blocknr(sh, i, 0),
  345. sh->dev[i].log_checksum, 0, false);
  346. r5l_append_payload_page(log, sh->dev[i].page);
  347. }
  348. if (sh->qd_idx >= 0) {
  349. r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
  350. sh->sector, sh->dev[sh->pd_idx].log_checksum,
  351. sh->dev[sh->qd_idx].log_checksum, true);
  352. r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
  353. r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
  354. } else {
  355. r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
  356. sh->sector, sh->dev[sh->pd_idx].log_checksum,
  357. 0, false);
  358. r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
  359. }
  360. list_add_tail(&sh->log_list, &io->stripe_list);
  361. atomic_inc(&io->pending_stripe);
  362. sh->log_io = io;
  363. }
  364. static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
  365. /*
  366. * running in raid5d, where reclaim could wait for raid5d too (when it flushes
  367. * data from log to raid disks), so we shouldn't wait for reclaim here
  368. */
  369. int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
  370. {
  371. int write_disks = 0;
  372. int data_pages, parity_pages;
  373. int meta_size;
  374. int reserve;
  375. int i;
  376. if (!log)
  377. return -EAGAIN;
  378. /* Don't support stripe batch */
  379. if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
  380. test_bit(STRIPE_SYNCING, &sh->state)) {
  381. /* the stripe is written to log, we start writing it to raid */
  382. clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
  383. return -EAGAIN;
  384. }
  385. for (i = 0; i < sh->disks; i++) {
  386. void *addr;
  387. if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
  388. continue;
  389. write_disks++;
  390. /* checksum is already calculated in last run */
  391. if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
  392. continue;
  393. addr = kmap_atomic(sh->dev[i].page);
  394. sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
  395. addr, PAGE_SIZE);
  396. kunmap_atomic(addr);
  397. }
  398. parity_pages = 1 + !!(sh->qd_idx >= 0);
  399. data_pages = write_disks - parity_pages;
  400. meta_size =
  401. ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
  402. * data_pages) +
  403. sizeof(struct r5l_payload_data_parity) +
  404. sizeof(__le32) * parity_pages;
  405. /* Doesn't work with very big raid array */
  406. if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
  407. return -EINVAL;
  408. set_bit(STRIPE_LOG_TRAPPED, &sh->state);
  409. /*
  410. * The stripe must enter state machine again to finish the write, so
  411. * don't delay.
  412. */
  413. clear_bit(STRIPE_DELAYED, &sh->state);
  414. atomic_inc(&sh->count);
  415. mutex_lock(&log->io_mutex);
  416. /* meta + data */
  417. reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
  418. if (r5l_has_free_space(log, reserve))
  419. r5l_log_stripe(log, sh, data_pages, parity_pages);
  420. else {
  421. spin_lock(&log->no_space_stripes_lock);
  422. list_add_tail(&sh->log_list, &log->no_space_stripes);
  423. spin_unlock(&log->no_space_stripes_lock);
  424. r5l_wake_reclaim(log, reserve);
  425. }
  426. mutex_unlock(&log->io_mutex);
  427. return 0;
  428. }
  429. void r5l_write_stripe_run(struct r5l_log *log)
  430. {
  431. if (!log)
  432. return;
  433. mutex_lock(&log->io_mutex);
  434. r5l_submit_current_io(log);
  435. mutex_unlock(&log->io_mutex);
  436. }
  437. int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio)
  438. {
  439. if (!log)
  440. return -ENODEV;
  441. /*
  442. * we flush log disk cache first, then write stripe data to raid disks.
  443. * So if bio is finished, the log disk cache is flushed already. The
  444. * recovery guarantees we can recovery the bio from log disk, so we
  445. * don't need to flush again
  446. */
  447. if (bio->bi_iter.bi_size == 0) {
  448. bio_endio(bio);
  449. return 0;
  450. }
  451. bio->bi_rw &= ~REQ_FLUSH;
  452. return -EAGAIN;
  453. }
  454. /* This will run after log space is reclaimed */
  455. static void r5l_run_no_space_stripes(struct r5l_log *log)
  456. {
  457. struct stripe_head *sh;
  458. spin_lock(&log->no_space_stripes_lock);
  459. while (!list_empty(&log->no_space_stripes)) {
  460. sh = list_first_entry(&log->no_space_stripes,
  461. struct stripe_head, log_list);
  462. list_del_init(&sh->log_list);
  463. set_bit(STRIPE_HANDLE, &sh->state);
  464. raid5_release_stripe(sh);
  465. }
  466. spin_unlock(&log->no_space_stripes_lock);
  467. }
  468. static void __r5l_stripe_write_finished(struct r5l_io_unit *io)
  469. {
  470. struct r5l_log *log = io->log;
  471. struct r5l_io_unit *last;
  472. sector_t reclaimable_space;
  473. unsigned long flags;
  474. spin_lock_irqsave(&log->io_list_lock, flags);
  475. __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
  476. /* might move 0 entry */
  477. r5l_move_io_unit_list(&log->flushed_ios, &log->stripe_end_ios,
  478. IO_UNIT_STRIPE_END);
  479. if (list_empty(&log->stripe_end_ios)) {
  480. spin_unlock_irqrestore(&log->io_list_lock, flags);
  481. return;
  482. }
  483. last = list_last_entry(&log->stripe_end_ios,
  484. struct r5l_io_unit, log_sibling);
  485. reclaimable_space = r5l_ring_distance(log, log->last_checkpoint,
  486. last->log_end);
  487. if (reclaimable_space >= log->max_free_space)
  488. r5l_wake_reclaim(log, 0);
  489. r5l_compress_stripe_end_list(log);
  490. spin_unlock_irqrestore(&log->io_list_lock, flags);
  491. wake_up(&log->iounit_wait);
  492. }
  493. void r5l_stripe_write_finished(struct stripe_head *sh)
  494. {
  495. struct r5l_io_unit *io;
  496. io = sh->log_io;
  497. sh->log_io = NULL;
  498. if (io && atomic_dec_and_test(&io->pending_stripe))
  499. __r5l_stripe_write_finished(io);
  500. }
  501. static void r5l_log_flush_endio(struct bio *bio)
  502. {
  503. struct r5l_log *log = container_of(bio, struct r5l_log,
  504. flush_bio);
  505. unsigned long flags;
  506. struct r5l_io_unit *io;
  507. struct stripe_head *sh;
  508. spin_lock_irqsave(&log->io_list_lock, flags);
  509. list_for_each_entry(io, &log->flushing_ios, log_sibling) {
  510. while (!list_empty(&io->stripe_list)) {
  511. sh = list_first_entry(&io->stripe_list,
  512. struct stripe_head, log_list);
  513. list_del_init(&sh->log_list);
  514. set_bit(STRIPE_HANDLE, &sh->state);
  515. raid5_release_stripe(sh);
  516. }
  517. }
  518. list_splice_tail_init(&log->flushing_ios, &log->flushed_ios);
  519. spin_unlock_irqrestore(&log->io_list_lock, flags);
  520. }
  521. /*
  522. * Starting dispatch IO to raid.
  523. * io_unit(meta) consists of a log. There is one situation we want to avoid. A
  524. * broken meta in the middle of a log causes recovery can't find meta at the
  525. * head of log. If operations require meta at the head persistent in log, we
  526. * must make sure meta before it persistent in log too. A case is:
  527. *
  528. * stripe data/parity is in log, we start write stripe to raid disks. stripe
  529. * data/parity must be persistent in log before we do the write to raid disks.
  530. *
  531. * The solution is we restrictly maintain io_unit list order. In this case, we
  532. * only write stripes of an io_unit to raid disks till the io_unit is the first
  533. * one whose data/parity is in log.
  534. */
  535. void r5l_flush_stripe_to_raid(struct r5l_log *log)
  536. {
  537. bool do_flush;
  538. if (!log)
  539. return;
  540. spin_lock_irq(&log->io_list_lock);
  541. /* flush bio is running */
  542. if (!list_empty(&log->flushing_ios)) {
  543. spin_unlock_irq(&log->io_list_lock);
  544. return;
  545. }
  546. list_splice_tail_init(&log->io_end_ios, &log->flushing_ios);
  547. do_flush = !list_empty(&log->flushing_ios);
  548. spin_unlock_irq(&log->io_list_lock);
  549. if (!do_flush)
  550. return;
  551. bio_reset(&log->flush_bio);
  552. log->flush_bio.bi_bdev = log->rdev->bdev;
  553. log->flush_bio.bi_end_io = r5l_log_flush_endio;
  554. submit_bio(WRITE_FLUSH, &log->flush_bio);
  555. }
  556. static void r5l_kick_io_unit(struct r5l_log *log)
  557. {
  558. md_wakeup_thread(log->rdev->mddev->thread);
  559. wait_event_lock_irq(log->iounit_wait, !list_empty(&log->stripe_end_ios),
  560. log->io_list_lock);
  561. }
  562. static void r5l_write_super(struct r5l_log *log, sector_t cp);
  563. static void r5l_do_reclaim(struct r5l_log *log)
  564. {
  565. struct r5l_io_unit *io, *last;
  566. LIST_HEAD(list);
  567. sector_t free = 0;
  568. sector_t reclaim_target = xchg(&log->reclaim_target, 0);
  569. spin_lock_irq(&log->io_list_lock);
  570. /*
  571. * move proper io_unit to reclaim list. We should not change the order.
  572. * reclaimable/unreclaimable io_unit can be mixed in the list, we
  573. * shouldn't reuse space of an unreclaimable io_unit
  574. */
  575. while (1) {
  576. struct list_head *target_list = NULL;
  577. while (!list_empty(&log->stripe_end_ios)) {
  578. io = list_first_entry(&log->stripe_end_ios,
  579. struct r5l_io_unit, log_sibling);
  580. list_move_tail(&io->log_sibling, &list);
  581. free += r5l_ring_distance(log, io->log_start,
  582. io->log_end);
  583. }
  584. if (free >= reclaim_target ||
  585. (list_empty(&log->running_ios) &&
  586. list_empty(&log->io_end_ios) &&
  587. list_empty(&log->flushing_ios) &&
  588. list_empty(&log->flushed_ios)))
  589. break;
  590. /* Below waiting mostly happens when we shutdown the raid */
  591. if (!list_empty(&log->flushed_ios))
  592. target_list = &log->flushed_ios;
  593. else if (!list_empty(&log->flushing_ios))
  594. target_list = &log->flushing_ios;
  595. else if (!list_empty(&log->io_end_ios))
  596. target_list = &log->io_end_ios;
  597. else if (!list_empty(&log->running_ios))
  598. target_list = &log->running_ios;
  599. r5l_kick_io_unit(log);
  600. }
  601. spin_unlock_irq(&log->io_list_lock);
  602. if (list_empty(&list))
  603. return;
  604. /* super always point to last valid meta */
  605. last = list_last_entry(&list, struct r5l_io_unit, log_sibling);
  606. /*
  607. * write_super will flush cache of each raid disk. We must write super
  608. * here, because the log area might be reused soon and we don't want to
  609. * confuse recovery
  610. */
  611. r5l_write_super(log, last->log_start);
  612. mutex_lock(&log->io_mutex);
  613. log->last_checkpoint = last->log_start;
  614. log->last_cp_seq = last->seq;
  615. mutex_unlock(&log->io_mutex);
  616. r5l_run_no_space_stripes(log);
  617. while (!list_empty(&list)) {
  618. io = list_first_entry(&list, struct r5l_io_unit, log_sibling);
  619. list_del(&io->log_sibling);
  620. r5l_free_io_unit(log, io);
  621. }
  622. }
  623. static void r5l_reclaim_thread(struct md_thread *thread)
  624. {
  625. struct mddev *mddev = thread->mddev;
  626. struct r5conf *conf = mddev->private;
  627. struct r5l_log *log = conf->log;
  628. if (!log)
  629. return;
  630. r5l_do_reclaim(log);
  631. }
  632. static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
  633. {
  634. unsigned long target;
  635. unsigned long new = (unsigned long)space; /* overflow in theory */
  636. do {
  637. target = log->reclaim_target;
  638. if (new < target)
  639. return;
  640. } while (cmpxchg(&log->reclaim_target, target, new) != target);
  641. md_wakeup_thread(log->reclaim_thread);
  642. }
  643. struct r5l_recovery_ctx {
  644. struct page *meta_page; /* current meta */
  645. sector_t meta_total_blocks; /* total size of current meta and data */
  646. sector_t pos; /* recovery position */
  647. u64 seq; /* recovery position seq */
  648. };
  649. static int r5l_read_meta_block(struct r5l_log *log,
  650. struct r5l_recovery_ctx *ctx)
  651. {
  652. struct page *page = ctx->meta_page;
  653. struct r5l_meta_block *mb;
  654. u32 crc, stored_crc;
  655. if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
  656. return -EIO;
  657. mb = page_address(page);
  658. stored_crc = le32_to_cpu(mb->checksum);
  659. mb->checksum = 0;
  660. if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
  661. le64_to_cpu(mb->seq) != ctx->seq ||
  662. mb->version != R5LOG_VERSION ||
  663. le64_to_cpu(mb->position) != ctx->pos)
  664. return -EINVAL;
  665. crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
  666. if (stored_crc != crc)
  667. return -EINVAL;
  668. if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
  669. return -EINVAL;
  670. ctx->meta_total_blocks = BLOCK_SECTORS;
  671. return 0;
  672. }
  673. static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
  674. struct r5l_recovery_ctx *ctx,
  675. sector_t stripe_sect,
  676. int *offset, sector_t *log_offset)
  677. {
  678. struct r5conf *conf = log->rdev->mddev->private;
  679. struct stripe_head *sh;
  680. struct r5l_payload_data_parity *payload;
  681. int disk_index;
  682. sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
  683. while (1) {
  684. payload = page_address(ctx->meta_page) + *offset;
  685. if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
  686. raid5_compute_sector(conf,
  687. le64_to_cpu(payload->location), 0,
  688. &disk_index, sh);
  689. sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
  690. sh->dev[disk_index].page, READ, false);
  691. sh->dev[disk_index].log_checksum =
  692. le32_to_cpu(payload->checksum[0]);
  693. set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
  694. ctx->meta_total_blocks += BLOCK_SECTORS;
  695. } else {
  696. disk_index = sh->pd_idx;
  697. sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
  698. sh->dev[disk_index].page, READ, false);
  699. sh->dev[disk_index].log_checksum =
  700. le32_to_cpu(payload->checksum[0]);
  701. set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
  702. if (sh->qd_idx >= 0) {
  703. disk_index = sh->qd_idx;
  704. sync_page_io(log->rdev,
  705. r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
  706. PAGE_SIZE, sh->dev[disk_index].page,
  707. READ, false);
  708. sh->dev[disk_index].log_checksum =
  709. le32_to_cpu(payload->checksum[1]);
  710. set_bit(R5_Wantwrite,
  711. &sh->dev[disk_index].flags);
  712. }
  713. ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
  714. }
  715. *log_offset = r5l_ring_add(log, *log_offset,
  716. le32_to_cpu(payload->size));
  717. *offset += sizeof(struct r5l_payload_data_parity) +
  718. sizeof(__le32) *
  719. (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
  720. if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
  721. break;
  722. }
  723. for (disk_index = 0; disk_index < sh->disks; disk_index++) {
  724. void *addr;
  725. u32 checksum;
  726. if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
  727. continue;
  728. addr = kmap_atomic(sh->dev[disk_index].page);
  729. checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
  730. kunmap_atomic(addr);
  731. if (checksum != sh->dev[disk_index].log_checksum)
  732. goto error;
  733. }
  734. for (disk_index = 0; disk_index < sh->disks; disk_index++) {
  735. struct md_rdev *rdev, *rrdev;
  736. if (!test_and_clear_bit(R5_Wantwrite,
  737. &sh->dev[disk_index].flags))
  738. continue;
  739. /* in case device is broken */
  740. rdev = rcu_dereference(conf->disks[disk_index].rdev);
  741. if (rdev)
  742. sync_page_io(rdev, stripe_sect, PAGE_SIZE,
  743. sh->dev[disk_index].page, WRITE, false);
  744. rrdev = rcu_dereference(conf->disks[disk_index].replacement);
  745. if (rrdev)
  746. sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
  747. sh->dev[disk_index].page, WRITE, false);
  748. }
  749. raid5_release_stripe(sh);
  750. return 0;
  751. error:
  752. for (disk_index = 0; disk_index < sh->disks; disk_index++)
  753. sh->dev[disk_index].flags = 0;
  754. raid5_release_stripe(sh);
  755. return -EINVAL;
  756. }
  757. static int r5l_recovery_flush_one_meta(struct r5l_log *log,
  758. struct r5l_recovery_ctx *ctx)
  759. {
  760. struct r5conf *conf = log->rdev->mddev->private;
  761. struct r5l_payload_data_parity *payload;
  762. struct r5l_meta_block *mb;
  763. int offset;
  764. sector_t log_offset;
  765. sector_t stripe_sector;
  766. mb = page_address(ctx->meta_page);
  767. offset = sizeof(struct r5l_meta_block);
  768. log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
  769. while (offset < le32_to_cpu(mb->meta_size)) {
  770. int dd;
  771. payload = (void *)mb + offset;
  772. stripe_sector = raid5_compute_sector(conf,
  773. le64_to_cpu(payload->location), 0, &dd, NULL);
  774. if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
  775. &offset, &log_offset))
  776. return -EINVAL;
  777. }
  778. return 0;
  779. }
  780. /* copy data/parity from log to raid disks */
  781. static void r5l_recovery_flush_log(struct r5l_log *log,
  782. struct r5l_recovery_ctx *ctx)
  783. {
  784. while (1) {
  785. if (r5l_read_meta_block(log, ctx))
  786. return;
  787. if (r5l_recovery_flush_one_meta(log, ctx))
  788. return;
  789. ctx->seq++;
  790. ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
  791. }
  792. }
  793. static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
  794. u64 seq)
  795. {
  796. struct page *page;
  797. struct r5l_meta_block *mb;
  798. u32 crc;
  799. page = alloc_page(GFP_KERNEL | __GFP_ZERO);
  800. if (!page)
  801. return -ENOMEM;
  802. mb = page_address(page);
  803. mb->magic = cpu_to_le32(R5LOG_MAGIC);
  804. mb->version = R5LOG_VERSION;
  805. mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
  806. mb->seq = cpu_to_le64(seq);
  807. mb->position = cpu_to_le64(pos);
  808. crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
  809. mb->checksum = cpu_to_le32(crc);
  810. if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
  811. __free_page(page);
  812. return -EIO;
  813. }
  814. __free_page(page);
  815. return 0;
  816. }
  817. static int r5l_recovery_log(struct r5l_log *log)
  818. {
  819. struct r5l_recovery_ctx ctx;
  820. ctx.pos = log->last_checkpoint;
  821. ctx.seq = log->last_cp_seq;
  822. ctx.meta_page = alloc_page(GFP_KERNEL);
  823. if (!ctx.meta_page)
  824. return -ENOMEM;
  825. r5l_recovery_flush_log(log, &ctx);
  826. __free_page(ctx.meta_page);
  827. /*
  828. * we did a recovery. Now ctx.pos points to an invalid meta block. New
  829. * log will start here. but we can't let superblock point to last valid
  830. * meta block. The log might looks like:
  831. * | meta 1| meta 2| meta 3|
  832. * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
  833. * superblock points to meta 1, we write a new valid meta 2n. if crash
  834. * happens again, new recovery will start from meta 1. Since meta 2n is
  835. * valid now, recovery will think meta 3 is valid, which is wrong.
  836. * The solution is we create a new meta in meta2 with its seq == meta
  837. * 1's seq + 10 and let superblock points to meta2. The same recovery will
  838. * not think meta 3 is a valid meta, because its seq doesn't match
  839. */
  840. if (ctx.seq > log->last_cp_seq + 1) {
  841. int ret;
  842. ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
  843. if (ret)
  844. return ret;
  845. log->seq = ctx.seq + 11;
  846. log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
  847. r5l_write_super(log, ctx.pos);
  848. } else {
  849. log->log_start = ctx.pos;
  850. log->seq = ctx.seq;
  851. }
  852. return 0;
  853. }
  854. static void r5l_write_super(struct r5l_log *log, sector_t cp)
  855. {
  856. struct mddev *mddev = log->rdev->mddev;
  857. log->rdev->journal_tail = cp;
  858. set_bit(MD_CHANGE_DEVS, &mddev->flags);
  859. }
  860. static int r5l_load_log(struct r5l_log *log)
  861. {
  862. struct md_rdev *rdev = log->rdev;
  863. struct page *page;
  864. struct r5l_meta_block *mb;
  865. sector_t cp = log->rdev->journal_tail;
  866. u32 stored_crc, expected_crc;
  867. bool create_super = false;
  868. int ret;
  869. /* Make sure it's valid */
  870. if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
  871. cp = 0;
  872. page = alloc_page(GFP_KERNEL);
  873. if (!page)
  874. return -ENOMEM;
  875. if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
  876. ret = -EIO;
  877. goto ioerr;
  878. }
  879. mb = page_address(page);
  880. if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
  881. mb->version != R5LOG_VERSION) {
  882. create_super = true;
  883. goto create;
  884. }
  885. stored_crc = le32_to_cpu(mb->checksum);
  886. mb->checksum = 0;
  887. expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
  888. if (stored_crc != expected_crc) {
  889. create_super = true;
  890. goto create;
  891. }
  892. if (le64_to_cpu(mb->position) != cp) {
  893. create_super = true;
  894. goto create;
  895. }
  896. create:
  897. if (create_super) {
  898. log->last_cp_seq = prandom_u32();
  899. cp = 0;
  900. /*
  901. * Make sure super points to correct address. Log might have
  902. * data very soon. If super hasn't correct log tail address,
  903. * recovery can't find the log
  904. */
  905. r5l_write_super(log, cp);
  906. } else
  907. log->last_cp_seq = le64_to_cpu(mb->seq);
  908. log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
  909. log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
  910. if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
  911. log->max_free_space = RECLAIM_MAX_FREE_SPACE;
  912. log->last_checkpoint = cp;
  913. __free_page(page);
  914. return r5l_recovery_log(log);
  915. ioerr:
  916. __free_page(page);
  917. return ret;
  918. }
  919. int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
  920. {
  921. struct r5l_log *log;
  922. if (PAGE_SIZE != 4096)
  923. return -EINVAL;
  924. log = kzalloc(sizeof(*log), GFP_KERNEL);
  925. if (!log)
  926. return -ENOMEM;
  927. log->rdev = rdev;
  928. log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
  929. sizeof(rdev->mddev->uuid));
  930. mutex_init(&log->io_mutex);
  931. spin_lock_init(&log->io_list_lock);
  932. INIT_LIST_HEAD(&log->running_ios);
  933. INIT_LIST_HEAD(&log->io_end_ios);
  934. INIT_LIST_HEAD(&log->stripe_end_ios);
  935. INIT_LIST_HEAD(&log->flushing_ios);
  936. INIT_LIST_HEAD(&log->flushed_ios);
  937. bio_init(&log->flush_bio);
  938. log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
  939. if (!log->io_kc)
  940. goto io_kc;
  941. log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
  942. log->rdev->mddev, "reclaim");
  943. if (!log->reclaim_thread)
  944. goto reclaim_thread;
  945. init_waitqueue_head(&log->iounit_wait);
  946. INIT_LIST_HEAD(&log->no_space_stripes);
  947. spin_lock_init(&log->no_space_stripes_lock);
  948. if (r5l_load_log(log))
  949. goto error;
  950. conf->log = log;
  951. return 0;
  952. error:
  953. md_unregister_thread(&log->reclaim_thread);
  954. reclaim_thread:
  955. kmem_cache_destroy(log->io_kc);
  956. io_kc:
  957. kfree(log);
  958. return -EINVAL;
  959. }
  960. void r5l_exit_log(struct r5l_log *log)
  961. {
  962. /*
  963. * at this point all stripes are finished, so io_unit is at least in
  964. * STRIPE_END state
  965. */
  966. r5l_wake_reclaim(log, -1L);
  967. md_unregister_thread(&log->reclaim_thread);
  968. r5l_do_reclaim(log);
  969. /*
  970. * force a super update, r5l_do_reclaim might updated the super.
  971. * mddev->thread is already stopped
  972. */
  973. md_update_sb(log->rdev->mddev, 1);
  974. kmem_cache_destroy(log->io_kc);
  975. kfree(log);
  976. }