|
@@ -477,7 +477,7 @@ struct cache {
|
|
spinlock_t invalidation_lock;
|
|
spinlock_t invalidation_lock;
|
|
struct list_head invalidation_requests;
|
|
struct list_head invalidation_requests;
|
|
|
|
|
|
- struct io_tracker origin_tracker;
|
|
|
|
|
|
+ struct io_tracker tracker;
|
|
|
|
|
|
struct work_struct commit_ws;
|
|
struct work_struct commit_ws;
|
|
struct batcher committer;
|
|
struct batcher committer;
|
|
@@ -904,8 +904,7 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
|
|
|
|
|
|
static bool accountable_bio(struct cache *cache, struct bio *bio)
|
|
static bool accountable_bio(struct cache *cache, struct bio *bio)
|
|
{
|
|
{
|
|
- return ((bio->bi_bdev == cache->origin_dev->bdev) &&
|
|
|
|
- bio_op(bio) != REQ_OP_DISCARD);
|
|
|
|
|
|
+ return bio_op(bio) != REQ_OP_DISCARD;
|
|
}
|
|
}
|
|
|
|
|
|
static void accounted_begin(struct cache *cache, struct bio *bio)
|
|
static void accounted_begin(struct cache *cache, struct bio *bio)
|
|
@@ -915,7 +914,7 @@ static void accounted_begin(struct cache *cache, struct bio *bio)
|
|
|
|
|
|
if (accountable_bio(cache, bio)) {
|
|
if (accountable_bio(cache, bio)) {
|
|
pb->len = bio_sectors(bio);
|
|
pb->len = bio_sectors(bio);
|
|
- iot_io_begin(&cache->origin_tracker, pb->len);
|
|
|
|
|
|
+ iot_io_begin(&cache->tracker, pb->len);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -924,7 +923,7 @@ static void accounted_complete(struct cache *cache, struct bio *bio)
|
|
size_t pb_data_size = get_per_bio_data_size(cache);
|
|
size_t pb_data_size = get_per_bio_data_size(cache);
|
|
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
|
|
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
|
|
|
|
|
|
- iot_io_end(&cache->origin_tracker, pb->len);
|
|
|
|
|
|
+ iot_io_end(&cache->tracker, pb->len);
|
|
}
|
|
}
|
|
|
|
|
|
static void accounted_request(struct cache *cache, struct bio *bio)
|
|
static void accounted_request(struct cache *cache, struct bio *bio)
|
|
@@ -1725,7 +1724,7 @@ enum busy {
|
|
|
|
|
|
static enum busy spare_migration_bandwidth(struct cache *cache)
|
|
static enum busy spare_migration_bandwidth(struct cache *cache)
|
|
{
|
|
{
|
|
- bool idle = iot_idle_for(&cache->origin_tracker, HZ);
|
|
|
|
|
|
+ bool idle = iot_idle_for(&cache->tracker, HZ);
|
|
sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
|
|
sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
|
|
cache->sectors_per_block;
|
|
cache->sectors_per_block;
|
|
|
|
|
|
@@ -2720,7 +2719,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
|
|
|
|
|
|
batcher_init(&cache->committer, commit_op, cache,
|
|
batcher_init(&cache->committer, commit_op, cache,
|
|
issue_op, cache, cache->wq);
|
|
issue_op, cache, cache->wq);
|
|
- iot_init(&cache->origin_tracker);
|
|
|
|
|
|
+ iot_init(&cache->tracker);
|
|
|
|
|
|
init_rwsem(&cache->background_work_lock);
|
|
init_rwsem(&cache->background_work_lock);
|
|
prevent_background_work(cache);
|
|
prevent_background_work(cache);
|
|
@@ -2944,7 +2943,7 @@ static void cache_postsuspend(struct dm_target *ti)
|
|
|
|
|
|
cancel_delayed_work(&cache->waker);
|
|
cancel_delayed_work(&cache->waker);
|
|
flush_workqueue(cache->wq);
|
|
flush_workqueue(cache->wq);
|
|
- WARN_ON(cache->origin_tracker.in_flight);
|
|
|
|
|
|
+ WARN_ON(cache->tracker.in_flight);
|
|
|
|
|
|
/*
|
|
/*
|
|
* If it's a flush suspend there won't be any deferred bios, so this
|
|
* If it's a flush suspend there won't be any deferred bios, so this
|