|
@@ -212,8 +212,8 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
|
|
|
unsigned i;
|
|
|
|
|
|
bio->bi_iter.bi_sector = SB_SECTOR;
|
|
|
- bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META;
|
|
|
bio->bi_iter.bi_size = SB_SIZE;
|
|
|
+ bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
|
|
|
bch_bio_map(bio, NULL);
|
|
|
|
|
|
out->offset = cpu_to_le64(sb->offset);
|
|
@@ -333,7 +333,7 @@ static void uuid_io_unlock(struct closure *cl)
|
|
|
up(&c->uuid_write_mutex);
|
|
|
}
|
|
|
|
|
|
-static void uuid_io(struct cache_set *c, unsigned long rw,
|
|
|
+static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
|
|
|
struct bkey *k, struct closure *parent)
|
|
|
{
|
|
|
struct closure *cl = &c->uuid_write;
|
|
@@ -348,21 +348,22 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
|
|
|
for (i = 0; i < KEY_PTRS(k); i++) {
|
|
|
struct bio *bio = bch_bbio_alloc(c);
|
|
|
|
|
|
- bio->bi_rw = REQ_SYNC|REQ_META|rw;
|
|
|
+ bio->bi_rw = REQ_SYNC|REQ_META|op_flags;
|
|
|
bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
|
|
|
|
|
|
bio->bi_end_io = uuid_endio;
|
|
|
bio->bi_private = cl;
|
|
|
+ bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
|
|
|
bch_bio_map(bio, c->uuids);
|
|
|
|
|
|
bch_submit_bbio(bio, c, k, i);
|
|
|
|
|
|
- if (!(rw & WRITE))
|
|
|
+ if (op != REQ_OP_WRITE)
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
bch_extent_to_text(buf, sizeof(buf), k);
|
|
|
- pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf);
|
|
|
+ pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
|
|
|
|
|
|
for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
|
|
|
if (!bch_is_zero(u->uuid, 16))
|
|
@@ -381,7 +382,7 @@ static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
|
|
|
return "bad uuid pointer";
|
|
|
|
|
|
bkey_copy(&c->uuid_bucket, k);
|
|
|
- uuid_io(c, READ_SYNC, k, cl);
|
|
|
+ uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl);
|
|
|
|
|
|
if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
|
|
|
struct uuid_entry_v0 *u0 = (void *) c->uuids;
|
|
@@ -426,7 +427,7 @@ static int __uuid_write(struct cache_set *c)
|
|
|
return 1;
|
|
|
|
|
|
SET_KEY_SIZE(&k.key, c->sb.bucket_size);
|
|
|
- uuid_io(c, REQ_WRITE, &k.key, &cl);
|
|
|
+ uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
|
|
|
closure_sync(&cl);
|
|
|
|
|
|
bkey_copy(&c->uuid_bucket, &k.key);
|
|
@@ -498,7 +499,8 @@ static void prio_endio(struct bio *bio)
|
|
|
closure_put(&ca->prio);
|
|
|
}
|
|
|
|
|
|
-static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
|
|
|
+static void prio_io(struct cache *ca, uint64_t bucket, int op,
|
|
|
+ unsigned long op_flags)
|
|
|
{
|
|
|
struct closure *cl = &ca->prio;
|
|
|
struct bio *bio = bch_bbio_alloc(ca->set);
|
|
@@ -507,11 +509,11 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
|
|
|
|
|
|
bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
|
|
|
bio->bi_bdev = ca->bdev;
|
|
|
- bio->bi_rw = REQ_SYNC|REQ_META|rw;
|
|
|
bio->bi_iter.bi_size = bucket_bytes(ca);
|
|
|
|
|
|
bio->bi_end_io = prio_endio;
|
|
|
bio->bi_private = ca;
|
|
|
+ bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
|
|
|
bch_bio_map(bio, ca->disk_buckets);
|
|
|
|
|
|
closure_bio_submit(bio, &ca->prio);
|
|
@@ -557,7 +559,7 @@ void bch_prio_write(struct cache *ca)
|
|
|
BUG_ON(bucket == -1);
|
|
|
|
|
|
mutex_unlock(&ca->set->bucket_lock);
|
|
|
- prio_io(ca, bucket, REQ_WRITE);
|
|
|
+ prio_io(ca, bucket, REQ_OP_WRITE, 0);
|
|
|
mutex_lock(&ca->set->bucket_lock);
|
|
|
|
|
|
ca->prio_buckets[i] = bucket;
|
|
@@ -599,7 +601,7 @@ static void prio_read(struct cache *ca, uint64_t bucket)
|
|
|
ca->prio_last_buckets[bucket_nr] = bucket;
|
|
|
bucket_nr++;
|
|
|
|
|
|
- prio_io(ca, bucket, READ_SYNC);
|
|
|
+ prio_io(ca, bucket, REQ_OP_READ, READ_SYNC);
|
|
|
|
|
|
if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
|
|
|
pr_warn("bad csum reading priorities");
|