|
@@ -733,8 +733,6 @@ static void bcache_device_detach(struct bcache_device *d)
|
|
|
static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
|
|
|
unsigned id)
|
|
|
{
|
|
|
- BUG_ON(test_bit(CACHE_SET_STOPPING, &c->flags));
|
|
|
-
|
|
|
d->id = id;
|
|
|
d->c = c;
|
|
|
c->devices[id] = d;
|
|
@@ -927,6 +925,7 @@ static void cached_dev_detach_finish(struct work_struct *w)
|
|
|
list_move(&dc->list, &uncached_devices);
|
|
|
|
|
|
clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
|
|
|
+ clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
|
|
|
|
|
|
mutex_unlock(&bch_register_lock);
|
|
|
|
|
@@ -1041,6 +1040,9 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
|
|
|
*/
|
|
|
atomic_set(&dc->count, 1);
|
|
|
|
|
|
+ if (bch_cached_dev_writeback_start(dc))
|
|
|
+ return -ENOMEM;
|
|
|
+
|
|
|
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
|
|
|
bch_sectors_dirty_init(dc);
|
|
|
atomic_set(&dc->has_dirty, 1);
|
|
@@ -1070,7 +1072,8 @@ static void cached_dev_free(struct closure *cl)
|
|
|
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
|
|
|
|
|
|
cancel_delayed_work_sync(&dc->writeback_rate_update);
|
|
|
- kthread_stop(dc->writeback_thread);
|
|
|
+ if (!IS_ERR_OR_NULL(dc->writeback_thread))
|
|
|
+ kthread_stop(dc->writeback_thread);
|
|
|
|
|
|
mutex_lock(&bch_register_lock);
|
|
|
|
|
@@ -1081,12 +1084,8 @@ static void cached_dev_free(struct closure *cl)
|
|
|
|
|
|
mutex_unlock(&bch_register_lock);
|
|
|
|
|
|
- if (!IS_ERR_OR_NULL(dc->bdev)) {
|
|
|
- if (dc->bdev->bd_disk)
|
|
|
- blk_sync_queue(bdev_get_queue(dc->bdev));
|
|
|
-
|
|
|
+ if (!IS_ERR_OR_NULL(dc->bdev))
|
|
|
blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
|
|
- }
|
|
|
|
|
|
wake_up(&unregister_wait);
|
|
|
|
|
@@ -1213,7 +1212,9 @@ void bch_flash_dev_release(struct kobject *kobj)
|
|
|
static void flash_dev_free(struct closure *cl)
|
|
|
{
|
|
|
struct bcache_device *d = container_of(cl, struct bcache_device, cl);
|
|
|
+ mutex_lock(&bch_register_lock);
|
|
|
bcache_device_free(d);
|
|
|
+ mutex_unlock(&bch_register_lock);
|
|
|
kobject_put(&d->kobj);
|
|
|
}
|
|
|
|
|
@@ -1221,7 +1222,9 @@ static void flash_dev_flush(struct closure *cl)
|
|
|
{
|
|
|
struct bcache_device *d = container_of(cl, struct bcache_device, cl);
|
|
|
|
|
|
+ mutex_lock(&bch_register_lock);
|
|
|
bcache_device_unlink(d);
|
|
|
+ mutex_unlock(&bch_register_lock);
|
|
|
kobject_del(&d->kobj);
|
|
|
continue_at(cl, flash_dev_free, system_wq);
|
|
|
}
|
|
@@ -1277,6 +1280,9 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size)
|
|
|
if (test_bit(CACHE_SET_STOPPING, &c->flags))
|
|
|
return -EINTR;
|
|
|
|
|
|
+ if (!test_bit(CACHE_SET_RUNNING, &c->flags))
|
|
|
+ return -EPERM;
|
|
|
+
|
|
|
u = uuid_find_empty(c);
|
|
|
if (!u) {
|
|
|
pr_err("Can't create volume, no room for UUID");
|
|
@@ -1346,8 +1352,11 @@ static void cache_set_free(struct closure *cl)
|
|
|
bch_journal_free(c);
|
|
|
|
|
|
for_each_cache(ca, c, i)
|
|
|
- if (ca)
|
|
|
+ if (ca) {
|
|
|
+ ca->set = NULL;
|
|
|
+ c->cache[ca->sb.nr_this_dev] = NULL;
|
|
|
kobject_put(&ca->kobj);
|
|
|
+ }
|
|
|
|
|
|
bch_bset_sort_state_free(&c->sort);
|
|
|
free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
|
|
@@ -1405,9 +1414,11 @@ static void cache_set_flush(struct closure *cl)
|
|
|
if (ca->alloc_thread)
|
|
|
kthread_stop(ca->alloc_thread);
|
|
|
|
|
|
- cancel_delayed_work_sync(&c->journal.work);
|
|
|
- /* flush last journal entry if needed */
|
|
|
- c->journal.work.work.func(&c->journal.work.work);
|
|
|
+ if (c->journal.cur) {
|
|
|
+ cancel_delayed_work_sync(&c->journal.work);
|
|
|
+ /* flush last journal entry if needed */
|
|
|
+ c->journal.work.work.func(&c->journal.work.work);
|
|
|
+ }
|
|
|
|
|
|
closure_return(cl);
|
|
|
}
|
|
@@ -1586,7 +1597,7 @@ static void run_cache_set(struct cache_set *c)
|
|
|
goto err;
|
|
|
|
|
|
err = "error reading btree root";
|
|
|
- c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true);
|
|
|
+ c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL);
|
|
|
if (IS_ERR_OR_NULL(c->root))
|
|
|
goto err;
|
|
|
|
|
@@ -1661,7 +1672,7 @@ static void run_cache_set(struct cache_set *c)
|
|
|
goto err;
|
|
|
|
|
|
err = "cannot allocate new btree root";
|
|
|
- c->root = bch_btree_node_alloc(c, NULL, 0);
|
|
|
+ c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
|
|
|
if (IS_ERR_OR_NULL(c->root))
|
|
|
goto err;
|
|
|
|
|
@@ -1697,6 +1708,7 @@ static void run_cache_set(struct cache_set *c)
|
|
|
|
|
|
flash_devs_run(c);
|
|
|
|
|
|
+ set_bit(CACHE_SET_RUNNING, &c->flags);
|
|
|
return;
|
|
|
err:
|
|
|
closure_sync(&cl);
|
|
@@ -1760,6 +1772,7 @@ found:
|
|
|
pr_debug("set version = %llu", c->sb.version);
|
|
|
}
|
|
|
|
|
|
+ kobject_get(&ca->kobj);
|
|
|
ca->set = c;
|
|
|
ca->set->cache[ca->sb.nr_this_dev] = ca;
|
|
|
c->cache_by_alloc[c->caches_loaded++] = ca;
|
|
@@ -1780,8 +1793,10 @@ void bch_cache_release(struct kobject *kobj)
|
|
|
struct cache *ca = container_of(kobj, struct cache, kobj);
|
|
|
unsigned i;
|
|
|
|
|
|
- if (ca->set)
|
|
|
+ if (ca->set) {
|
|
|
+ BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
|
|
|
ca->set->cache[ca->sb.nr_this_dev] = NULL;
|
|
|
+ }
|
|
|
|
|
|
bio_split_pool_free(&ca->bio_split_hook);
|
|
|
|
|
@@ -1798,10 +1813,8 @@ void bch_cache_release(struct kobject *kobj)
|
|
|
if (ca->sb_bio.bi_inline_vecs[0].bv_page)
|
|
|
put_page(ca->sb_bio.bi_io_vec[0].bv_page);
|
|
|
|
|
|
- if (!IS_ERR_OR_NULL(ca->bdev)) {
|
|
|
- blk_sync_queue(bdev_get_queue(ca->bdev));
|
|
|
+ if (!IS_ERR_OR_NULL(ca->bdev))
|
|
|
blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
|
|
- }
|
|
|
|
|
|
kfree(ca);
|
|
|
module_put(THIS_MODULE);
|
|
@@ -1844,7 +1857,7 @@ static int cache_alloc(struct cache_sb *sb, struct cache *ca)
|
|
|
}
|
|
|
|
|
|
static void register_cache(struct cache_sb *sb, struct page *sb_page,
|
|
|
- struct block_device *bdev, struct cache *ca)
|
|
|
+ struct block_device *bdev, struct cache *ca)
|
|
|
{
|
|
|
char name[BDEVNAME_SIZE];
|
|
|
const char *err = "cannot allocate memory";
|
|
@@ -1877,10 +1890,12 @@ static void register_cache(struct cache_sb *sb, struct page *sb_page,
|
|
|
goto err;
|
|
|
|
|
|
pr_info("registered cache device %s", bdevname(bdev, name));
|
|
|
+out:
|
|
|
+ kobject_put(&ca->kobj);
|
|
|
return;
|
|
|
err:
|
|
|
pr_notice("error opening %s: %s", bdevname(bdev, name), err);
|
|
|
- kobject_put(&ca->kobj);
|
|
|
+ goto out;
|
|
|
}
|
|
|
|
|
|
/* Global interfaces/init */
|
|
@@ -1945,10 +1960,12 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
|
|
|
if (IS_ERR(bdev)) {
|
|
|
if (bdev == ERR_PTR(-EBUSY)) {
|
|
|
bdev = lookup_bdev(strim(path));
|
|
|
+ mutex_lock(&bch_register_lock);
|
|
|
if (!IS_ERR(bdev) && bch_is_open(bdev))
|
|
|
err = "device already registered";
|
|
|
else
|
|
|
err = "device busy";
|
|
|
+ mutex_unlock(&bch_register_lock);
|
|
|
}
|
|
|
goto err;
|
|
|
}
|