|
@@ -719,7 +719,7 @@ static void cached_dev_read_error(struct closure *cl)
|
|
|
|
|
|
/* XXX: invalidate cache */
|
|
/* XXX: invalidate cache */
|
|
|
|
|
|
- closure_bio_submit(bio, cl, s->d);
|
|
|
|
|
|
+ closure_bio_submit(bio, cl);
|
|
}
|
|
}
|
|
|
|
|
|
continue_at(cl, cached_dev_cache_miss_done, NULL);
|
|
continue_at(cl, cached_dev_cache_miss_done, NULL);
|
|
@@ -842,7 +842,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
|
|
s->cache_miss = miss;
|
|
s->cache_miss = miss;
|
|
s->iop.bio = cache_bio;
|
|
s->iop.bio = cache_bio;
|
|
bio_get(cache_bio);
|
|
bio_get(cache_bio);
|
|
- closure_bio_submit(cache_bio, &s->cl, s->d);
|
|
|
|
|
|
+ closure_bio_submit(cache_bio, &s->cl);
|
|
|
|
|
|
return ret;
|
|
return ret;
|
|
out_put:
|
|
out_put:
|
|
@@ -850,7 +850,7 @@ out_put:
|
|
out_submit:
|
|
out_submit:
|
|
miss->bi_end_io = request_endio;
|
|
miss->bi_end_io = request_endio;
|
|
miss->bi_private = &s->cl;
|
|
miss->bi_private = &s->cl;
|
|
- closure_bio_submit(miss, &s->cl, s->d);
|
|
|
|
|
|
+ closure_bio_submit(miss, &s->cl);
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -915,7 +915,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
|
|
|
|
|
|
if (!(bio->bi_rw & REQ_DISCARD) ||
|
|
if (!(bio->bi_rw & REQ_DISCARD) ||
|
|
blk_queue_discard(bdev_get_queue(dc->bdev)))
|
|
blk_queue_discard(bdev_get_queue(dc->bdev)))
|
|
- closure_bio_submit(bio, cl, s->d);
|
|
|
|
|
|
+ closure_bio_submit(bio, cl);
|
|
} else if (s->iop.writeback) {
|
|
} else if (s->iop.writeback) {
|
|
bch_writeback_add(dc);
|
|
bch_writeback_add(dc);
|
|
s->iop.bio = bio;
|
|
s->iop.bio = bio;
|
|
@@ -930,12 +930,12 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
|
|
flush->bi_end_io = request_endio;
|
|
flush->bi_end_io = request_endio;
|
|
flush->bi_private = cl;
|
|
flush->bi_private = cl;
|
|
|
|
|
|
- closure_bio_submit(flush, cl, s->d);
|
|
|
|
|
|
+ closure_bio_submit(flush, cl);
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
|
|
s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
|
|
|
|
|
|
- closure_bio_submit(bio, cl, s->d);
|
|
|
|
|
|
+ closure_bio_submit(bio, cl);
|
|
}
|
|
}
|
|
|
|
|
|
closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
|
|
closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
|
|
@@ -951,7 +951,7 @@ static void cached_dev_nodata(struct closure *cl)
|
|
bch_journal_meta(s->iop.c, cl);
|
|
bch_journal_meta(s->iop.c, cl);
|
|
|
|
|
|
/* If it's a flush, we send the flush to the backing device too */
|
|
/* If it's a flush, we send the flush to the backing device too */
|
|
- closure_bio_submit(bio, cl, s->d);
|
|
|
|
|
|
+ closure_bio_submit(bio, cl);
|
|
|
|
|
|
continue_at(cl, cached_dev_bio_complete, NULL);
|
|
continue_at(cl, cached_dev_bio_complete, NULL);
|
|
}
|
|
}
|
|
@@ -995,7 +995,7 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
|
|
!blk_queue_discard(bdev_get_queue(dc->bdev)))
|
|
!blk_queue_discard(bdev_get_queue(dc->bdev)))
|
|
bio_endio(bio);
|
|
bio_endio(bio);
|
|
else
|
|
else
|
|
- bch_generic_make_request(bio, &d->bio_split_hook);
|
|
|
|
|
|
+ generic_make_request(bio);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|