|
@@ -26,12 +26,12 @@ struct kmem_cache *bch_search_cache;
|
|
|
|
|
|
static void bch_data_insert_start(struct closure *);
|
|
static void bch_data_insert_start(struct closure *);
|
|
|
|
|
|
-static unsigned cache_mode(struct cached_dev *dc, struct bio *bio)
|
|
|
|
|
|
+static unsigned cache_mode(struct cached_dev *dc)
|
|
{
|
|
{
|
|
return BDEV_CACHE_MODE(&dc->sb);
|
|
return BDEV_CACHE_MODE(&dc->sb);
|
|
}
|
|
}
|
|
|
|
|
|
-static bool verify(struct cached_dev *dc, struct bio *bio)
|
|
|
|
|
|
+static bool verify(struct cached_dev *dc)
|
|
{
|
|
{
|
|
return dc->verify;
|
|
return dc->verify;
|
|
}
|
|
}
|
|
@@ -369,7 +369,7 @@ static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
|
|
static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
|
|
static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
|
|
{
|
|
{
|
|
struct cache_set *c = dc->disk.c;
|
|
struct cache_set *c = dc->disk.c;
|
|
- unsigned mode = cache_mode(dc, bio);
|
|
|
|
|
|
+ unsigned mode = cache_mode(dc);
|
|
unsigned sectors, congested = bch_get_congested(c);
|
|
unsigned sectors, congested = bch_get_congested(c);
|
|
struct task_struct *task = current;
|
|
struct task_struct *task = current;
|
|
struct io *i;
|
|
struct io *i;
|
|
@@ -747,7 +747,7 @@ static void cached_dev_read_done(struct closure *cl)
|
|
s->cache_miss = NULL;
|
|
s->cache_miss = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
- if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
|
|
|
|
|
|
+ if (verify(dc) && s->recoverable && !s->read_dirty_data)
|
|
bch_data_verify(dc, s->orig_bio);
|
|
bch_data_verify(dc, s->orig_bio);
|
|
|
|
|
|
bio_complete(s);
|
|
bio_complete(s);
|
|
@@ -772,7 +772,7 @@ static void cached_dev_read_done_bh(struct closure *cl)
|
|
|
|
|
|
if (s->iop.status)
|
|
if (s->iop.status)
|
|
continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
|
|
continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
|
|
- else if (s->iop.bio || verify(dc, &s->bio.bio))
|
|
|
|
|
|
+ else if (s->iop.bio || verify(dc))
|
|
continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
|
|
continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
|
|
else
|
|
else
|
|
continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
|
|
continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
|
|
@@ -899,7 +899,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
|
|
s->iop.bypass = true;
|
|
s->iop.bypass = true;
|
|
|
|
|
|
if (should_writeback(dc, s->orig_bio,
|
|
if (should_writeback(dc, s->orig_bio,
|
|
- cache_mode(dc, bio),
|
|
|
|
|
|
+ cache_mode(dc),
|
|
s->iop.bypass)) {
|
|
s->iop.bypass)) {
|
|
s->iop.bypass = false;
|
|
s->iop.bypass = false;
|
|
s->iop.writeback = true;
|
|
s->iop.writeback = true;
|