| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103 |
- /*
- * Copyright (C) 2015 Shaohua Li <shli@fb.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- */
- #include <linux/kernel.h>
- #include <linux/wait.h>
- #include <linux/blkdev.h>
- #include <linux/slab.h>
- #include <linux/raid/md_p.h>
- #include <linux/crc32c.h>
- #include <linux/random.h>
- #include "md.h"
- #include "raid5.h"
- /*
- * metadata/data stored in disk with 4k size unit (a block) regardless
- * underneath hardware sector size. only works with PAGE_SIZE == 4096
- */
- #define BLOCK_SECTORS (8)
- /*
- * reclaim runs every 1/4 disk size or 10G reclaimable space. This can prevent
- * recovery scans a very long log
- */
- #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */
- #define RECLAIM_MAX_FREE_SPACE_SHIFT (2)
- struct r5l_log {
- struct md_rdev *rdev;
- u32 uuid_checksum;
- sector_t device_size; /* log device size, round to
- * BLOCK_SECTORS */
- sector_t max_free_space; /* reclaim run if free space is at
- * this size */
- sector_t last_checkpoint; /* log tail. where recovery scan
- * starts from */
- u64 last_cp_seq; /* log tail sequence */
- sector_t log_start; /* log head. where new data appends */
- u64 seq; /* log head sequence */
- struct mutex io_mutex;
- struct r5l_io_unit *current_io; /* current io_unit accepting new data */
- spinlock_t io_list_lock;
- struct list_head running_ios; /* io_units which are still running,
- * and have not yet been completely
- * written to the log */
- struct list_head io_end_ios; /* io_units which have been completely
- * written to the log but not yet written
- * to the RAID */
- struct list_head stripe_end_ios;/* io_units which have been completely
- * written to the RAID but have not yet
- * been considered for updating super */
- struct kmem_cache *io_kc;
- struct md_thread *reclaim_thread;
- unsigned long reclaim_target; /* number of space that need to be
- * reclaimed. if it's 0, reclaim spaces
- * used by io_units which are in
- * IO_UNIT_STRIPE_END state (eg, reclaim
- * dones't wait for specific io_unit
- * switching to IO_UNIT_STRIPE_END
- * state) */
- struct list_head no_space_stripes; /* pending stripes, log has no space */
- spinlock_t no_space_stripes_lock;
- };
- /*
- * an IO range starts from a meta data block and end at the next meta data
- * block. The io unit's the meta data block tracks data/parity followed it. io
- * unit is written to log disk with normal write, as we always flush log disk
- * first and then start move data to raid disks, there is no requirement to
- * write io unit with FLUSH/FUA
- */
- struct r5l_io_unit {
- struct r5l_log *log;
- struct page *meta_page; /* store meta block */
- int meta_offset; /* current offset in meta_page */
- struct bio_list bios;
- atomic_t pending_io; /* pending bios not written to log yet */
- struct bio *current_bio;/* current_bio accepting new data */
- atomic_t pending_stripe;/* how many stripes not flushed to raid */
- u64 seq; /* seq number of the metablock */
- sector_t log_start; /* where the io_unit starts */
- sector_t log_end; /* where the io_unit ends */
- struct list_head log_sibling; /* log->running_ios */
- struct list_head stripe_list; /* stripes added to the io_unit */
- int state;
- wait_queue_head_t wait_state;
- };
- /* r5l_io_unit state */
- enum r5l_io_unit_state {
- IO_UNIT_RUNNING = 0, /* accepting new IO */
- IO_UNIT_IO_START = 1, /* io_unit bio start writing to log,
- * don't accepting new bio */
- IO_UNIT_IO_END = 2, /* io_unit bio finish writing to log */
- IO_UNIT_STRIPE_START = 3, /* stripes of io_unit are flushing to raid */
- IO_UNIT_STRIPE_END = 4, /* stripes data finished writing to raid */
- };
- static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc)
- {
- start += inc;
- if (start >= log->device_size)
- start = start - log->device_size;
- return start;
- }
- static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start,
- sector_t end)
- {
- if (end >= start)
- return end - start;
- else
- return end + log->device_size - start;
- }
- static bool r5l_has_free_space(struct r5l_log *log, sector_t size)
- {
- sector_t used_size;
- used_size = r5l_ring_distance(log, log->last_checkpoint,
- log->log_start);
- return log->device_size > used_size + size;
- }
- static struct r5l_io_unit *r5l_alloc_io_unit(struct r5l_log *log)
- {
- struct r5l_io_unit *io;
- /* We can't handle memory allocate failure so far */
- gfp_t gfp = GFP_NOIO | __GFP_NOFAIL;
- io = kmem_cache_zalloc(log->io_kc, gfp);
- io->log = log;
- io->meta_page = alloc_page(gfp | __GFP_ZERO);
- bio_list_init(&io->bios);
- INIT_LIST_HEAD(&io->log_sibling);
- INIT_LIST_HEAD(&io->stripe_list);
- io->state = IO_UNIT_RUNNING;
- init_waitqueue_head(&io->wait_state);
- return io;
- }
- static void r5l_free_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
- {
- __free_page(io->meta_page);
- kmem_cache_free(log->io_kc, io);
- }
- static void r5l_move_io_unit_list(struct list_head *from, struct list_head *to,
- enum r5l_io_unit_state state)
- {
- struct r5l_io_unit *io;
- while (!list_empty(from)) {
- io = list_first_entry(from, struct r5l_io_unit, log_sibling);
- /* don't change list order */
- if (io->state >= state)
- list_move_tail(&io->log_sibling, to);
- else
- break;
- }
- }
- /*
- * We don't want too many io_units reside in stripe_end_ios list, which will
- * waste a lot of memory. So we try to remove some. But we must keep at least 2
- * io_units. The superblock must point to a valid meta, if it's the last meta,
- * recovery can scan less
- */
- static void r5l_compress_stripe_end_list(struct r5l_log *log)
- {
- struct r5l_io_unit *first, *last, *io;
- first = list_first_entry(&log->stripe_end_ios,
- struct r5l_io_unit, log_sibling);
- last = list_last_entry(&log->stripe_end_ios,
- struct r5l_io_unit, log_sibling);
- if (first == last)
- return;
- list_del(&first->log_sibling);
- list_del(&last->log_sibling);
- while (!list_empty(&log->stripe_end_ios)) {
- io = list_first_entry(&log->stripe_end_ios,
- struct r5l_io_unit, log_sibling);
- list_del(&io->log_sibling);
- first->log_end = io->log_end;
- r5l_free_io_unit(log, io);
- }
- list_add_tail(&first->log_sibling, &log->stripe_end_ios);
- list_add_tail(&last->log_sibling, &log->stripe_end_ios);
- }
- static void r5l_wake_reclaim(struct r5l_log *log, sector_t space);
- static void __r5l_set_io_unit_state(struct r5l_io_unit *io,
- enum r5l_io_unit_state state)
- {
- struct r5l_log *log = io->log;
- if (WARN_ON(io->state >= state))
- return;
- io->state = state;
- if (state == IO_UNIT_IO_END)
- r5l_move_io_unit_list(&log->running_ios, &log->io_end_ios,
- IO_UNIT_IO_END);
- if (state == IO_UNIT_STRIPE_END) {
- struct r5l_io_unit *last;
- sector_t reclaimable_space;
- r5l_move_io_unit_list(&log->io_end_ios, &log->stripe_end_ios,
- IO_UNIT_STRIPE_END);
- last = list_last_entry(&log->stripe_end_ios,
- struct r5l_io_unit, log_sibling);
- reclaimable_space = r5l_ring_distance(log, log->last_checkpoint,
- last->log_end);
- if (reclaimable_space >= log->max_free_space)
- r5l_wake_reclaim(log, 0);
- r5l_compress_stripe_end_list(log);
- }
- wake_up(&io->wait_state);
- }
- static void r5l_set_io_unit_state(struct r5l_io_unit *io,
- enum r5l_io_unit_state state)
- {
- struct r5l_log *log = io->log;
- unsigned long flags;
- spin_lock_irqsave(&log->io_list_lock, flags);
- __r5l_set_io_unit_state(io, state);
- spin_unlock_irqrestore(&log->io_list_lock, flags);
- }
- /* XXX: totally ignores I/O errors */
- static void r5l_log_endio(struct bio *bio)
- {
- struct r5l_io_unit *io = bio->bi_private;
- struct r5l_log *log = io->log;
- bio_put(bio);
- if (!atomic_dec_and_test(&io->pending_io))
- return;
- r5l_set_io_unit_state(io, IO_UNIT_IO_END);
- md_wakeup_thread(log->rdev->mddev->thread);
- }
- static void r5l_submit_current_io(struct r5l_log *log)
- {
- struct r5l_io_unit *io = log->current_io;
- struct r5l_meta_block *block;
- struct bio *bio;
- u32 crc;
- if (!io)
- return;
- block = page_address(io->meta_page);
- block->meta_size = cpu_to_le32(io->meta_offset);
- crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE);
- block->checksum = cpu_to_le32(crc);
- log->current_io = NULL;
- r5l_set_io_unit_state(io, IO_UNIT_IO_START);
- while ((bio = bio_list_pop(&io->bios))) {
- /* all IO must start from rdev->data_offset */
- bio->bi_iter.bi_sector += log->rdev->data_offset;
- submit_bio(WRITE, bio);
- }
- }
- static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log)
- {
- struct r5l_io_unit *io;
- struct r5l_meta_block *block;
- struct bio *bio;
- io = r5l_alloc_io_unit(log);
- block = page_address(io->meta_page);
- block->magic = cpu_to_le32(R5LOG_MAGIC);
- block->version = R5LOG_VERSION;
- block->seq = cpu_to_le64(log->seq);
- block->position = cpu_to_le64(log->log_start);
- io->log_start = log->log_start;
- io->meta_offset = sizeof(struct r5l_meta_block);
- io->seq = log->seq;
- bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
- io->current_bio = bio;
- bio->bi_rw = WRITE;
- bio->bi_bdev = log->rdev->bdev;
- bio->bi_iter.bi_sector = log->log_start;
- bio_add_page(bio, io->meta_page, PAGE_SIZE, 0);
- bio->bi_end_io = r5l_log_endio;
- bio->bi_private = io;
- bio_list_add(&io->bios, bio);
- atomic_inc(&io->pending_io);
- log->seq++;
- log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS);
- io->log_end = log->log_start;
- /* current bio hit disk end */
- if (log->log_start == 0)
- io->current_bio = NULL;
- spin_lock_irq(&log->io_list_lock);
- list_add_tail(&io->log_sibling, &log->running_ios);
- spin_unlock_irq(&log->io_list_lock);
- return io;
- }
- static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size)
- {
- struct r5l_io_unit *io;
- io = log->current_io;
- if (io && io->meta_offset + payload_size > PAGE_SIZE)
- r5l_submit_current_io(log);
- io = log->current_io;
- if (io)
- return 0;
- log->current_io = r5l_new_meta(log);
- return 0;
- }
- static void r5l_append_payload_meta(struct r5l_log *log, u16 type,
- sector_t location,
- u32 checksum1, u32 checksum2,
- bool checksum2_valid)
- {
- struct r5l_io_unit *io = log->current_io;
- struct r5l_payload_data_parity *payload;
- payload = page_address(io->meta_page) + io->meta_offset;
- payload->header.type = cpu_to_le16(type);
- payload->header.flags = cpu_to_le16(0);
- payload->size = cpu_to_le32((1 + !!checksum2_valid) <<
- (PAGE_SHIFT - 9));
- payload->location = cpu_to_le64(location);
- payload->checksum[0] = cpu_to_le32(checksum1);
- if (checksum2_valid)
- payload->checksum[1] = cpu_to_le32(checksum2);
- io->meta_offset += sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) * (1 + !!checksum2_valid);
- }
- static void r5l_append_payload_page(struct r5l_log *log, struct page *page)
- {
- struct r5l_io_unit *io = log->current_io;
- alloc_bio:
- if (!io->current_bio) {
- struct bio *bio;
- bio = bio_kmalloc(GFP_NOIO | __GFP_NOFAIL, BIO_MAX_PAGES);
- bio->bi_rw = WRITE;
- bio->bi_bdev = log->rdev->bdev;
- bio->bi_iter.bi_sector = log->log_start;
- bio->bi_end_io = r5l_log_endio;
- bio->bi_private = io;
- bio_list_add(&io->bios, bio);
- atomic_inc(&io->pending_io);
- io->current_bio = bio;
- }
- if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) {
- io->current_bio = NULL;
- goto alloc_bio;
- }
- log->log_start = r5l_ring_add(log, log->log_start,
- BLOCK_SECTORS);
- /* current bio hit disk end */
- if (log->log_start == 0)
- io->current_bio = NULL;
- io->log_end = log->log_start;
- }
- static void r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh,
- int data_pages, int parity_pages)
- {
- int i;
- int meta_size;
- struct r5l_io_unit *io;
- meta_size =
- ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
- * data_pages) +
- sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) * parity_pages;
- r5l_get_meta(log, meta_size);
- io = log->current_io;
- for (i = 0; i < sh->disks; i++) {
- if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
- continue;
- if (i == sh->pd_idx || i == sh->qd_idx)
- continue;
- r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA,
- raid5_compute_blocknr(sh, i, 0),
- sh->dev[i].log_checksum, 0, false);
- r5l_append_payload_page(log, sh->dev[i].page);
- }
- if (sh->qd_idx >= 0) {
- r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
- sh->sector, sh->dev[sh->pd_idx].log_checksum,
- sh->dev[sh->qd_idx].log_checksum, true);
- r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
- r5l_append_payload_page(log, sh->dev[sh->qd_idx].page);
- } else {
- r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY,
- sh->sector, sh->dev[sh->pd_idx].log_checksum,
- 0, false);
- r5l_append_payload_page(log, sh->dev[sh->pd_idx].page);
- }
- list_add_tail(&sh->log_list, &io->stripe_list);
- atomic_inc(&io->pending_stripe);
- sh->log_io = io;
- }
- /*
- * running in raid5d, where reclaim could wait for raid5d too (when it flushes
- * data from log to raid disks), so we shouldn't wait for reclaim here
- */
- int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh)
- {
- int write_disks = 0;
- int data_pages, parity_pages;
- int meta_size;
- int reserve;
- int i;
- if (!log)
- return -EAGAIN;
- /* Don't support stripe batch */
- if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) ||
- test_bit(STRIPE_SYNCING, &sh->state)) {
- /* the stripe is written to log, we start writing it to raid */
- clear_bit(STRIPE_LOG_TRAPPED, &sh->state);
- return -EAGAIN;
- }
- for (i = 0; i < sh->disks; i++) {
- void *addr;
- if (!test_bit(R5_Wantwrite, &sh->dev[i].flags))
- continue;
- write_disks++;
- /* checksum is already calculated in last run */
- if (test_bit(STRIPE_LOG_TRAPPED, &sh->state))
- continue;
- addr = kmap_atomic(sh->dev[i].page);
- sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum,
- addr, PAGE_SIZE);
- kunmap_atomic(addr);
- }
- parity_pages = 1 + !!(sh->qd_idx >= 0);
- data_pages = write_disks - parity_pages;
- meta_size =
- ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32))
- * data_pages) +
- sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) * parity_pages;
- /* Doesn't work with very big raid array */
- if (meta_size + sizeof(struct r5l_meta_block) > PAGE_SIZE)
- return -EINVAL;
- set_bit(STRIPE_LOG_TRAPPED, &sh->state);
- atomic_inc(&sh->count);
- mutex_lock(&log->io_mutex);
- /* meta + data */
- reserve = (1 + write_disks) << (PAGE_SHIFT - 9);
- if (r5l_has_free_space(log, reserve))
- r5l_log_stripe(log, sh, data_pages, parity_pages);
- else {
- spin_lock(&log->no_space_stripes_lock);
- list_add_tail(&sh->log_list, &log->no_space_stripes);
- spin_unlock(&log->no_space_stripes_lock);
- r5l_wake_reclaim(log, reserve);
- }
- mutex_unlock(&log->io_mutex);
- return 0;
- }
- void r5l_write_stripe_run(struct r5l_log *log)
- {
- if (!log)
- return;
- mutex_lock(&log->io_mutex);
- r5l_submit_current_io(log);
- mutex_unlock(&log->io_mutex);
- }
- /* This will run after log space is reclaimed */
- static void r5l_run_no_space_stripes(struct r5l_log *log)
- {
- struct stripe_head *sh;
- spin_lock(&log->no_space_stripes_lock);
- while (!list_empty(&log->no_space_stripes)) {
- sh = list_first_entry(&log->no_space_stripes,
- struct stripe_head, log_list);
- list_del_init(&sh->log_list);
- set_bit(STRIPE_HANDLE, &sh->state);
- raid5_release_stripe(sh);
- }
- spin_unlock(&log->no_space_stripes_lock);
- }
- void r5l_stripe_write_finished(struct stripe_head *sh)
- {
- struct r5l_io_unit *io;
- /* Don't support stripe batch */
- io = sh->log_io;
- if (!io)
- return;
- sh->log_io = NULL;
- if (atomic_dec_and_test(&io->pending_stripe))
- r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END);
- }
- /*
- * Starting dispatch IO to raid.
- * io_unit(meta) consists of a log. There is one situation we want to avoid. A
- * broken meta in the middle of a log causes recovery can't find meta at the
- * head of log. If operations require meta at the head persistent in log, we
- * must make sure meta before it persistent in log too. A case is:
- *
- * stripe data/parity is in log, we start write stripe to raid disks. stripe
- * data/parity must be persistent in log before we do the write to raid disks.
- *
- * The solution is we restrictly maintain io_unit list order. In this case, we
- * only write stripes of an io_unit to raid disks till the io_unit is the first
- * one whose data/parity is in log.
- */
- void r5l_flush_stripe_to_raid(struct r5l_log *log)
- {
- struct r5l_io_unit *io;
- struct stripe_head *sh;
- bool run_stripe;
- if (!log)
- return;
- spin_lock_irq(&log->io_list_lock);
- run_stripe = !list_empty(&log->io_end_ios);
- spin_unlock_irq(&log->io_list_lock);
- if (!run_stripe)
- return;
- blkdev_issue_flush(log->rdev->bdev, GFP_NOIO, NULL);
- spin_lock_irq(&log->io_list_lock);
- list_for_each_entry(io, &log->io_end_ios, log_sibling) {
- if (io->state >= IO_UNIT_STRIPE_START)
- continue;
- __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_START);
- while (!list_empty(&io->stripe_list)) {
- sh = list_first_entry(&io->stripe_list,
- struct stripe_head, log_list);
- list_del_init(&sh->log_list);
- set_bit(STRIPE_HANDLE, &sh->state);
- raid5_release_stripe(sh);
- }
- }
- spin_unlock_irq(&log->io_list_lock);
- }
- static void r5l_kick_io_unit(struct r5l_log *log, struct r5l_io_unit *io)
- {
- /* the log thread will write the io unit */
- wait_event(io->wait_state, io->state >= IO_UNIT_IO_END);
- if (io->state < IO_UNIT_STRIPE_START)
- r5l_flush_stripe_to_raid(log);
- wait_event(io->wait_state, io->state >= IO_UNIT_STRIPE_END);
- }
- static void r5l_write_super(struct r5l_log *log, sector_t cp);
- static void r5l_do_reclaim(struct r5l_log *log)
- {
- struct r5l_io_unit *io, *last;
- LIST_HEAD(list);
- sector_t free = 0;
- sector_t reclaim_target = xchg(&log->reclaim_target, 0);
- spin_lock_irq(&log->io_list_lock);
- /*
- * move proper io_unit to reclaim list. We should not change the order.
- * reclaimable/unreclaimable io_unit can be mixed in the list, we
- * shouldn't reuse space of an unreclaimable io_unit
- */
- while (1) {
- while (!list_empty(&log->stripe_end_ios)) {
- io = list_first_entry(&log->stripe_end_ios,
- struct r5l_io_unit, log_sibling);
- list_move_tail(&io->log_sibling, &list);
- free += r5l_ring_distance(log, io->log_start,
- io->log_end);
- }
- if (free >= reclaim_target ||
- (list_empty(&log->running_ios) &&
- list_empty(&log->io_end_ios) &&
- list_empty(&log->stripe_end_ios)))
- break;
- /* Below waiting mostly happens when we shutdown the raid */
- if (!list_empty(&log->io_end_ios)) {
- io = list_first_entry(&log->io_end_ios,
- struct r5l_io_unit, log_sibling);
- spin_unlock_irq(&log->io_list_lock);
- /* nobody else can delete the io, we are safe */
- r5l_kick_io_unit(log, io);
- spin_lock_irq(&log->io_list_lock);
- continue;
- }
- if (!list_empty(&log->running_ios)) {
- io = list_first_entry(&log->running_ios,
- struct r5l_io_unit, log_sibling);
- spin_unlock_irq(&log->io_list_lock);
- /* nobody else can delete the io, we are safe */
- r5l_kick_io_unit(log, io);
- spin_lock_irq(&log->io_list_lock);
- continue;
- }
- }
- spin_unlock_irq(&log->io_list_lock);
- if (list_empty(&list))
- return;
- /* super always point to last valid meta */
- last = list_last_entry(&list, struct r5l_io_unit, log_sibling);
- /*
- * write_super will flush cache of each raid disk. We must write super
- * here, because the log area might be reused soon and we don't want to
- * confuse recovery
- */
- r5l_write_super(log, last->log_start);
- mutex_lock(&log->io_mutex);
- log->last_checkpoint = last->log_start;
- log->last_cp_seq = last->seq;
- mutex_unlock(&log->io_mutex);
- r5l_run_no_space_stripes(log);
- while (!list_empty(&list)) {
- io = list_first_entry(&list, struct r5l_io_unit, log_sibling);
- list_del(&io->log_sibling);
- r5l_free_io_unit(log, io);
- }
- }
- static void r5l_reclaim_thread(struct md_thread *thread)
- {
- struct mddev *mddev = thread->mddev;
- struct r5conf *conf = mddev->private;
- struct r5l_log *log = conf->log;
- if (!log)
- return;
- r5l_do_reclaim(log);
- }
- static void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
- {
- unsigned long target;
- unsigned long new = (unsigned long)space; /* overflow in theory */
- do {
- target = log->reclaim_target;
- if (new < target)
- return;
- } while (cmpxchg(&log->reclaim_target, target, new) != target);
- md_wakeup_thread(log->reclaim_thread);
- }
- struct r5l_recovery_ctx {
- struct page *meta_page; /* current meta */
- sector_t meta_total_blocks; /* total size of current meta and data */
- sector_t pos; /* recovery position */
- u64 seq; /* recovery position seq */
- };
- static int r5l_read_meta_block(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
- {
- struct page *page = ctx->meta_page;
- struct r5l_meta_block *mb;
- u32 crc, stored_crc;
- if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, READ, false))
- return -EIO;
- mb = page_address(page);
- stored_crc = le32_to_cpu(mb->checksum);
- mb->checksum = 0;
- if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
- le64_to_cpu(mb->seq) != ctx->seq ||
- mb->version != R5LOG_VERSION ||
- le64_to_cpu(mb->position) != ctx->pos)
- return -EINVAL;
- crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
- if (stored_crc != crc)
- return -EINVAL;
- if (le32_to_cpu(mb->meta_size) > PAGE_SIZE)
- return -EINVAL;
- ctx->meta_total_blocks = BLOCK_SECTORS;
- return 0;
- }
- static int r5l_recovery_flush_one_stripe(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx,
- sector_t stripe_sect,
- int *offset, sector_t *log_offset)
- {
- struct r5conf *conf = log->rdev->mddev->private;
- struct stripe_head *sh;
- struct r5l_payload_data_parity *payload;
- int disk_index;
- sh = raid5_get_active_stripe(conf, stripe_sect, 0, 0, 0);
- while (1) {
- payload = page_address(ctx->meta_page) + *offset;
- if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) {
- raid5_compute_sector(conf,
- le64_to_cpu(payload->location), 0,
- &disk_index, sh);
- sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, READ, false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[0]);
- set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
- ctx->meta_total_blocks += BLOCK_SECTORS;
- } else {
- disk_index = sh->pd_idx;
- sync_page_io(log->rdev, *log_offset, PAGE_SIZE,
- sh->dev[disk_index].page, READ, false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[0]);
- set_bit(R5_Wantwrite, &sh->dev[disk_index].flags);
- if (sh->qd_idx >= 0) {
- disk_index = sh->qd_idx;
- sync_page_io(log->rdev,
- r5l_ring_add(log, *log_offset, BLOCK_SECTORS),
- PAGE_SIZE, sh->dev[disk_index].page,
- READ, false);
- sh->dev[disk_index].log_checksum =
- le32_to_cpu(payload->checksum[1]);
- set_bit(R5_Wantwrite,
- &sh->dev[disk_index].flags);
- }
- ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
- }
- *log_offset = r5l_ring_add(log, *log_offset,
- le32_to_cpu(payload->size));
- *offset += sizeof(struct r5l_payload_data_parity) +
- sizeof(__le32) *
- (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9));
- if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY)
- break;
- }
- for (disk_index = 0; disk_index < sh->disks; disk_index++) {
- void *addr;
- u32 checksum;
- if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags))
- continue;
- addr = kmap_atomic(sh->dev[disk_index].page);
- checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
- kunmap_atomic(addr);
- if (checksum != sh->dev[disk_index].log_checksum)
- goto error;
- }
- for (disk_index = 0; disk_index < sh->disks; disk_index++) {
- struct md_rdev *rdev, *rrdev;
- if (!test_and_clear_bit(R5_Wantwrite,
- &sh->dev[disk_index].flags))
- continue;
- /* in case device is broken */
- rdev = rcu_dereference(conf->disks[disk_index].rdev);
- if (rdev)
- sync_page_io(rdev, stripe_sect, PAGE_SIZE,
- sh->dev[disk_index].page, WRITE, false);
- rrdev = rcu_dereference(conf->disks[disk_index].replacement);
- if (rrdev)
- sync_page_io(rrdev, stripe_sect, PAGE_SIZE,
- sh->dev[disk_index].page, WRITE, false);
- }
- raid5_release_stripe(sh);
- return 0;
- error:
- for (disk_index = 0; disk_index < sh->disks; disk_index++)
- sh->dev[disk_index].flags = 0;
- raid5_release_stripe(sh);
- return -EINVAL;
- }
- static int r5l_recovery_flush_one_meta(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
- {
- struct r5conf *conf = log->rdev->mddev->private;
- struct r5l_payload_data_parity *payload;
- struct r5l_meta_block *mb;
- int offset;
- sector_t log_offset;
- sector_t stripe_sector;
- mb = page_address(ctx->meta_page);
- offset = sizeof(struct r5l_meta_block);
- log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
- while (offset < le32_to_cpu(mb->meta_size)) {
- int dd;
- payload = (void *)mb + offset;
- stripe_sector = raid5_compute_sector(conf,
- le64_to_cpu(payload->location), 0, &dd, NULL);
- if (r5l_recovery_flush_one_stripe(log, ctx, stripe_sector,
- &offset, &log_offset))
- return -EINVAL;
- }
- return 0;
- }
- /* copy data/parity from log to raid disks */
- static void r5l_recovery_flush_log(struct r5l_log *log,
- struct r5l_recovery_ctx *ctx)
- {
- while (1) {
- if (r5l_read_meta_block(log, ctx))
- return;
- if (r5l_recovery_flush_one_meta(log, ctx))
- return;
- ctx->seq++;
- ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks);
- }
- }
- static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos,
- u64 seq)
- {
- struct page *page;
- struct r5l_meta_block *mb;
- u32 crc;
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!page)
- return -ENOMEM;
- mb = page_address(page);
- mb->magic = cpu_to_le32(R5LOG_MAGIC);
- mb->version = R5LOG_VERSION;
- mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block));
- mb->seq = cpu_to_le64(seq);
- mb->position = cpu_to_le64(pos);
- crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
- mb->checksum = cpu_to_le32(crc);
- if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, WRITE_FUA, false)) {
- __free_page(page);
- return -EIO;
- }
- __free_page(page);
- return 0;
- }
- static int r5l_recovery_log(struct r5l_log *log)
- {
- struct r5l_recovery_ctx ctx;
- ctx.pos = log->last_checkpoint;
- ctx.seq = log->last_cp_seq;
- ctx.meta_page = alloc_page(GFP_KERNEL);
- if (!ctx.meta_page)
- return -ENOMEM;
- r5l_recovery_flush_log(log, &ctx);
- __free_page(ctx.meta_page);
- /*
- * we did a recovery. Now ctx.pos points to an invalid meta block. New
- * log will start here. but we can't let superblock point to last valid
- * meta block. The log might looks like:
- * | meta 1| meta 2| meta 3|
- * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If
- * superblock points to meta 1, we write a new valid meta 2n. if crash
- * happens again, new recovery will start from meta 1. Since meta 2n is
- * valid now, recovery will think meta 3 is valid, which is wrong.
- * The solution is we create a new meta in meta2 with its seq == meta
- * 1's seq + 10 and let superblock points to meta2. The same recovery will
- * not think meta 3 is a valid meta, because its seq doesn't match
- */
- if (ctx.seq > log->last_cp_seq + 1) {
- int ret;
- ret = r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq + 10);
- if (ret)
- return ret;
- log->seq = ctx.seq + 11;
- log->log_start = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
- r5l_write_super(log, ctx.pos);
- } else {
- log->log_start = ctx.pos;
- log->seq = ctx.seq;
- }
- return 0;
- }
- static void r5l_write_super(struct r5l_log *log, sector_t cp)
- {
- struct mddev *mddev = log->rdev->mddev;
- log->rdev->journal_tail = cp;
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- }
- static int r5l_load_log(struct r5l_log *log)
- {
- struct md_rdev *rdev = log->rdev;
- struct page *page;
- struct r5l_meta_block *mb;
- sector_t cp = log->rdev->journal_tail;
- u32 stored_crc, expected_crc;
- bool create_super = false;
- int ret;
- /* Make sure it's valid */
- if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp)
- cp = 0;
- page = alloc_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
- if (!sync_page_io(rdev, cp, PAGE_SIZE, page, READ, false)) {
- ret = -EIO;
- goto ioerr;
- }
- mb = page_address(page);
- if (le32_to_cpu(mb->magic) != R5LOG_MAGIC ||
- mb->version != R5LOG_VERSION) {
- create_super = true;
- goto create;
- }
- stored_crc = le32_to_cpu(mb->checksum);
- mb->checksum = 0;
- expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE);
- if (stored_crc != expected_crc) {
- create_super = true;
- goto create;
- }
- if (le64_to_cpu(mb->position) != cp) {
- create_super = true;
- goto create;
- }
- create:
- if (create_super) {
- log->last_cp_seq = prandom_u32();
- cp = 0;
- /*
- * Make sure super points to correct address. Log might have
- * data very soon. If super hasn't correct log tail address,
- * recovery can't find the log
- */
- r5l_write_super(log, cp);
- } else
- log->last_cp_seq = le64_to_cpu(mb->seq);
- log->device_size = round_down(rdev->sectors, BLOCK_SECTORS);
- log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT;
- if (log->max_free_space > RECLAIM_MAX_FREE_SPACE)
- log->max_free_space = RECLAIM_MAX_FREE_SPACE;
- log->last_checkpoint = cp;
- __free_page(page);
- return r5l_recovery_log(log);
- ioerr:
- __free_page(page);
- return ret;
- }
- int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
- {
- struct r5l_log *log;
- if (PAGE_SIZE != 4096)
- return -EINVAL;
- log = kzalloc(sizeof(*log), GFP_KERNEL);
- if (!log)
- return -ENOMEM;
- log->rdev = rdev;
- log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
- sizeof(rdev->mddev->uuid));
- mutex_init(&log->io_mutex);
- spin_lock_init(&log->io_list_lock);
- INIT_LIST_HEAD(&log->running_ios);
- INIT_LIST_HEAD(&log->io_end_ios);
- INIT_LIST_HEAD(&log->stripe_end_ios);
- log->io_kc = KMEM_CACHE(r5l_io_unit, 0);
- if (!log->io_kc)
- goto io_kc;
- log->reclaim_thread = md_register_thread(r5l_reclaim_thread,
- log->rdev->mddev, "reclaim");
- if (!log->reclaim_thread)
- goto reclaim_thread;
- INIT_LIST_HEAD(&log->no_space_stripes);
- spin_lock_init(&log->no_space_stripes_lock);
- if (r5l_load_log(log))
- goto error;
- conf->log = log;
- return 0;
- error:
- md_unregister_thread(&log->reclaim_thread);
- reclaim_thread:
- kmem_cache_destroy(log->io_kc);
- io_kc:
- kfree(log);
- return -EINVAL;
- }
- void r5l_exit_log(struct r5l_log *log)
- {
- /*
- * at this point all stripes are finished, so io_unit is at least in
- * STRIPE_END state
- */
- r5l_wake_reclaim(log, -1L);
- md_unregister_thread(&log->reclaim_thread);
- r5l_do_reclaim(log);
- /*
- * force a super update, r5l_do_reclaim might updated the super.
- * mddev->thread is already stopped
- */
- md_update_sb(log->rdev->mddev, 1);
- kmem_cache_destroy(log->io_kc);
- kfree(log);
- }
|