|
@@ -936,7 +936,6 @@ static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
|
|
|
static void cached_dev_detach_finish(struct work_struct *w)
|
|
|
{
|
|
|
struct cached_dev *dc = container_of(w, struct cached_dev, detach);
|
|
|
- char buf[BDEVNAME_SIZE];
|
|
|
struct closure cl;
|
|
|
closure_init_stack(&cl);
|
|
|
|
|
@@ -967,7 +966,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
|
|
|
|
|
|
mutex_unlock(&bch_register_lock);
|
|
|
|
|
|
- pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
|
|
|
+ pr_info("Caching disabled for %s", dc->backing_dev_name);
|
|
|
|
|
|
/* Drop ref we took in cached_dev_detach() */
|
|
|
closure_put(&dc->disk.cl);
|
|
@@ -999,29 +998,28 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
|
|
|
{
|
|
|
uint32_t rtime = cpu_to_le32(get_seconds());
|
|
|
struct uuid_entry *u;
|
|
|
- char buf[BDEVNAME_SIZE];
|
|
|
struct cached_dev *exist_dc, *t;
|
|
|
|
|
|
- bdevname(dc->bdev, buf);
|
|
|
-
|
|
|
if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
|
|
|
(!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
|
|
|
return -ENOENT;
|
|
|
|
|
|
if (dc->disk.c) {
|
|
|
- pr_err("Can't attach %s: already attached", buf);
|
|
|
+ pr_err("Can't attach %s: already attached",
|
|
|
+ dc->backing_dev_name);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
|
|
|
- pr_err("Can't attach %s: shutting down", buf);
|
|
|
+ pr_err("Can't attach %s: shutting down",
|
|
|
+ dc->backing_dev_name);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
|
if (dc->sb.block_size < c->sb.block_size) {
|
|
|
/* Will die */
|
|
|
pr_err("Couldn't attach %s: block size less than set's block size",
|
|
|
- buf);
|
|
|
+ dc->backing_dev_name);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
|
|
@@ -1029,7 +1027,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
|
|
|
list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
|
|
|
if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
|
|
|
pr_err("Tried to attach %s but duplicate UUID already attached",
|
|
|
- buf);
|
|
|
+ dc->backing_dev_name);
|
|
|
|
|
|
return -EINVAL;
|
|
|
}
|
|
@@ -1047,13 +1045,15 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
|
|
|
|
|
|
if (!u) {
|
|
|
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
|
|
|
- pr_err("Couldn't find uuid for %s in set", buf);
|
|
|
+ pr_err("Couldn't find uuid for %s in set",
|
|
|
+ dc->backing_dev_name);
|
|
|
return -ENOENT;
|
|
|
}
|
|
|
|
|
|
u = uuid_find_empty(c);
|
|
|
if (!u) {
|
|
|
- pr_err("Not caching %s, no room for UUID", buf);
|
|
|
+ pr_err("Not caching %s, no room for UUID",
|
|
|
+ dc->backing_dev_name);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
}
|
|
@@ -1112,7 +1112,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
|
|
|
up_write(&dc->writeback_lock);
|
|
|
|
|
|
pr_info("Caching %s as %s on set %pU",
|
|
|
- bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
|
|
|
+ dc->backing_dev_name,
|
|
|
+ dc->disk.disk->disk_name,
|
|
|
dc->disk.c->sb.set_uuid);
|
|
|
return 0;
|
|
|
}
|
|
@@ -1225,10 +1226,10 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
|
|
|
struct block_device *bdev,
|
|
|
struct cached_dev *dc)
|
|
|
{
|
|
|
- char name[BDEVNAME_SIZE];
|
|
|
const char *err = "cannot allocate memory";
|
|
|
struct cache_set *c;
|
|
|
|
|
|
+ bdevname(bdev, dc->backing_dev_name);
|
|
|
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
|
|
|
dc->bdev = bdev;
|
|
|
dc->bdev->bd_holder = dc;
|
|
@@ -1237,6 +1238,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
|
|
|
bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
|
|
|
get_page(sb_page);
|
|
|
|
|
|
+
|
|
|
if (cached_dev_init(dc, sb->block_size << 9))
|
|
|
goto err;
|
|
|
|
|
@@ -1247,7 +1249,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
|
|
|
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
|
|
|
goto err;
|
|
|
|
|
|
- pr_info("registered backing device %s", bdevname(bdev, name));
|
|
|
+ pr_info("registered backing device %s", dc->backing_dev_name);
|
|
|
|
|
|
list_add(&dc->list, &uncached_devices);
|
|
|
list_for_each_entry(c, &bch_cache_sets, list)
|
|
@@ -1259,7 +1261,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
|
|
|
|
|
|
return;
|
|
|
err:
|
|
|
- pr_notice("error %s: %s", bdevname(bdev, name), err);
|
|
|
+ pr_notice("error %s: %s", dc->backing_dev_name, err);
|
|
|
bcache_device_stop(&dc->disk);
|
|
|
}
|
|
|
|
|
@@ -1367,7 +1369,7 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
|
|
|
|
|
|
bool bch_cached_dev_error(struct cached_dev *dc)
|
|
|
{
|
|
|
- char name[BDEVNAME_SIZE];
|
|
|
+ struct cache_set *c;
|
|
|
|
|
|
if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
|
|
|
return false;
|
|
@@ -1377,7 +1379,22 @@ bool bch_cached_dev_error(struct cached_dev *dc)
|
|
|
smp_mb();
|
|
|
|
|
|
pr_err("stop %s: too many IO errors on backing device %s\n",
|
|
|
- dc->disk.disk->disk_name, bdevname(dc->bdev, name));
|
|
|
+ dc->disk.disk->disk_name, dc->backing_dev_name);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If the cached device is still attached to a cache set,
|
|
|
+ * even dc->io_disable is true and no more I/O requests
|
|
|
+ * accepted, cache device internal I/O (writeback scan or
|
|
|
+ * garbage collection) may still prevent bcache device from
|
|
|
+ * being stopped. So here CACHE_SET_IO_DISABLE should be
|
|
|
+ * set to c->flags too, to make the internal I/O to cache
|
|
|
+ * device rejected and stopped immediately.
|
|
|
+ * If c is NULL, that means the bcache device is not attached
|
|
|
+ * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
|
|
|
+ */
|
|
|
+ c = dc->disk.c;
|
|
|
+ if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
|
|
|
+ pr_info("CACHE_SET_IO_DISABLE already set");
|
|
|
|
|
|
bcache_device_stop(&dc->disk);
|
|
|
return true;
|
|
@@ -1395,7 +1412,7 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
|
|
|
return false;
|
|
|
|
|
|
if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
|
|
|
- pr_warn("CACHE_SET_IO_DISABLE already set");
|
|
|
+ pr_info("CACHE_SET_IO_DISABLE already set");
|
|
|
|
|
|
/* XXX: we can be called from atomic context
|
|
|
acquire_console_sem();
|
|
@@ -1539,6 +1556,20 @@ static void conditional_stop_bcache_device(struct cache_set *c,
|
|
|
*/
|
|
|
pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
|
|
|
d->disk->disk_name);
|
|
|
+ /*
|
|
|
+ * There might be a small time gap that cache set is
|
|
|
+ * released but bcache device is not. Inside this time
|
|
|
+ * gap, regular I/O requests will directly go into
|
|
|
+ * backing device as no cache set attached to. This
|
|
|
+ * behavior may also introduce potential inconsistence
|
|
|
+ * data in writeback mode while cache is dirty.
|
|
|
+ * Therefore before calling bcache_device_stop() due
|
|
|
+ * to a broken cache device, dc->io_disable should be
|
|
|
+ * explicitly set to true.
|
|
|
+ */
|
|
|
+ dc->io_disable = true;
|
|
|
+ /* make others know io_disable is true earlier */
|
|
|
+ smp_mb();
|
|
|
bcache_device_stop(d);
|
|
|
} else {
|
|
|
/*
|
|
@@ -2003,12 +2034,10 @@ static int cache_alloc(struct cache *ca)
|
|
|
static int register_cache(struct cache_sb *sb, struct page *sb_page,
|
|
|
struct block_device *bdev, struct cache *ca)
|
|
|
{
|
|
|
- char name[BDEVNAME_SIZE];
|
|
|
const char *err = NULL; /* must be set for any error case */
|
|
|
int ret = 0;
|
|
|
|
|
|
- bdevname(bdev, name);
|
|
|
-
|
|
|
+ bdevname(bdev, ca->cache_dev_name);
|
|
|
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
|
|
|
ca->bdev = bdev;
|
|
|
ca->bdev->bd_holder = ca;
|
|
@@ -2045,14 +2074,14 @@ static int register_cache(struct cache_sb *sb, struct page *sb_page,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
- pr_info("registered cache device %s", name);
|
|
|
+ pr_info("registered cache device %s", ca->cache_dev_name);
|
|
|
|
|
|
out:
|
|
|
kobject_put(&ca->kobj);
|
|
|
|
|
|
err:
|
|
|
if (err)
|
|
|
- pr_notice("error %s: %s", name, err);
|
|
|
+ pr_notice("error %s: %s", ca->cache_dev_name, err);
|
|
|
|
|
|
return ret;
|
|
|
}
|