|
|
@@ -401,15 +401,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
|
|
|
sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
|
|
|
struct bio *bio;
|
|
|
|
|
|
+ if (dmz_bdev_is_dying(zmd->dev))
|
|
|
+ return ERR_PTR(-EIO);
|
|
|
+
|
|
|
/* Get a new block and a BIO to read it */
|
|
|
mblk = dmz_alloc_mblock(zmd, mblk_no);
|
|
|
if (!mblk)
|
|
|
- return NULL;
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
bio = bio_alloc(GFP_NOIO, 1);
|
|
|
if (!bio) {
|
|
|
dmz_free_mblock(zmd, mblk);
|
|
|
- return NULL;
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
}
|
|
|
|
|
|
spin_lock(&zmd->mblk_lock);
|
|
|
@@ -540,8 +543,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
|
|
|
if (!mblk) {
|
|
|
/* Cache miss: read the block from disk */
|
|
|
mblk = dmz_get_mblock_slow(zmd, mblk_no);
|
|
|
- if (!mblk)
|
|
|
- return ERR_PTR(-ENOMEM);
|
|
|
+ if (IS_ERR(mblk))
|
|
|
+ return mblk;
|
|
|
}
|
|
|
|
|
|
/* Wait for on-going read I/O and check for error */
|
|
|
@@ -569,16 +572,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
|
|
|
/*
|
|
|
* Issue a metadata block write BIO.
|
|
|
*/
|
|
|
-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
|
|
|
- unsigned int set)
|
|
|
+static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
|
|
|
+ unsigned int set)
|
|
|
{
|
|
|
sector_t block = zmd->sb[set].block + mblk->no;
|
|
|
struct bio *bio;
|
|
|
|
|
|
+ if (dmz_bdev_is_dying(zmd->dev))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
bio = bio_alloc(GFP_NOIO, 1);
|
|
|
if (!bio) {
|
|
|
set_bit(DMZ_META_ERROR, &mblk->state);
|
|
|
- return;
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
set_bit(DMZ_META_WRITING, &mblk->state);
|
|
|
@@ -590,6 +596,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
|
|
|
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
|
|
|
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
|
|
|
submit_bio(bio);
|
|
|
+
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
@@ -601,6 +609,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
|
|
|
struct bio *bio;
|
|
|
int ret;
|
|
|
|
|
|
+ if (dmz_bdev_is_dying(zmd->dev))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
bio = bio_alloc(GFP_NOIO, 1);
|
|
|
if (!bio)
|
|
|
return -ENOMEM;
|
|
|
@@ -658,22 +669,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
|
|
|
{
|
|
|
struct dmz_mblock *mblk;
|
|
|
struct blk_plug plug;
|
|
|
- int ret = 0;
|
|
|
+ int ret = 0, nr_mblks_submitted = 0;
|
|
|
|
|
|
/* Issue writes */
|
|
|
blk_start_plug(&plug);
|
|
|
- list_for_each_entry(mblk, write_list, link)
|
|
|
- dmz_write_mblock(zmd, mblk, set);
|
|
|
+ list_for_each_entry(mblk, write_list, link) {
|
|
|
+ ret = dmz_write_mblock(zmd, mblk, set);
|
|
|
+ if (ret)
|
|
|
+ break;
|
|
|
+ nr_mblks_submitted++;
|
|
|
+ }
|
|
|
blk_finish_plug(&plug);
|
|
|
|
|
|
/* Wait for completion */
|
|
|
list_for_each_entry(mblk, write_list, link) {
|
|
|
+ if (!nr_mblks_submitted)
|
|
|
+ break;
|
|
|
wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
|
|
|
TASK_UNINTERRUPTIBLE);
|
|
|
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
|
|
|
clear_bit(DMZ_META_ERROR, &mblk->state);
|
|
|
ret = -EIO;
|
|
|
}
|
|
|
+ nr_mblks_submitted--;
|
|
|
}
|
|
|
|
|
|
/* Flush drive cache (this will also sync data) */
|
|
|
@@ -735,6 +753,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
|
|
|
*/
|
|
|
dmz_lock_flush(zmd);
|
|
|
|
|
|
+ if (dmz_bdev_is_dying(zmd->dev)) {
|
|
|
+ ret = -EIO;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+
|
|
|
/* Get dirty blocks */
|
|
|
spin_lock(&zmd->mblk_lock);
|
|
|
list_splice_init(&zmd->mblk_dirty_list, &write_list);
|
|
|
@@ -1623,6 +1646,10 @@ again:
|
|
|
/* Alloate a random zone */
|
|
|
dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
|
|
|
if (!dzone) {
|
|
|
+ if (dmz_bdev_is_dying(zmd->dev)) {
|
|
|
+ dzone = ERR_PTR(-EIO);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
dmz_wait_for_free_zones(zmd);
|
|
|
goto again;
|
|
|
}
|
|
|
@@ -1720,6 +1747,10 @@ again:
|
|
|
/* Alloate a random zone */
|
|
|
bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
|
|
|
if (!bzone) {
|
|
|
+ if (dmz_bdev_is_dying(zmd->dev)) {
|
|
|
+ bzone = ERR_PTR(-EIO);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
dmz_wait_for_free_zones(zmd);
|
|
|
goto again;
|
|
|
}
|