|
@@ -1826,6 +1826,11 @@ static inline bool bio_remaining_done(struct bio *bio)
|
|
|
* bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
|
|
* bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
|
|
|
* way to end I/O on a bio. No one should call bi_end_io() directly on a
|
|
* way to end I/O on a bio. No one should call bi_end_io() directly on a
|
|
|
* bio unless they own it and thus know that it has an end_io function.
|
|
* bio unless they own it and thus know that it has an end_io function.
|
|
|
|
|
+ *
|
|
|
|
|
+ * bio_endio() can be called several times on a bio that has been chained
|
|
|
|
|
+ * using bio_chain(). The ->bi_end_io() function will only be called the
|
|
|
|
|
+ * last time. At this point the BLK_TA_COMPLETE tracing event will be
|
|
|
|
|
+ * generated if BIO_TRACE_COMPLETION is set.
|
|
|
**/
|
|
**/
|
|
|
void bio_endio(struct bio *bio)
|
|
void bio_endio(struct bio *bio)
|
|
|
{
|
|
{
|
|
@@ -1846,6 +1851,12 @@ again:
|
|
|
goto again;
|
|
goto again;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
|
|
|
|
|
+ trace_block_bio_complete(bdev_get_queue(bio->bi_bdev),
|
|
|
|
|
+ bio, bio->bi_error);
|
|
|
|
|
+ bio_clear_flag(bio, BIO_TRACE_COMPLETION);
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
blk_throtl_bio_endio(bio);
|
|
blk_throtl_bio_endio(bio);
|
|
|
if (bio->bi_end_io)
|
|
if (bio->bi_end_io)
|
|
|
bio->bi_end_io(bio);
|
|
bio->bi_end_io(bio);
|
|
@@ -1885,6 +1896,9 @@ struct bio *bio_split(struct bio *bio, int sectors,
|
|
|
|
|
|
|
|
bio_advance(bio, split->bi_iter.bi_size);
|
|
bio_advance(bio, split->bi_iter.bi_size);
|
|
|
|
|
|
|
|
|
|
+ if (bio_flagged(bio, BIO_TRACE_COMPLETION))
|
|
|
|
|
+ bio_set_flag(bio, BIO_TRACE_COMPLETION);
|
|
|
|
|
+
|
|
|
return split;
|
|
return split;
|
|
|
}
|
|
}
|
|
|
EXPORT_SYMBOL(bio_split);
|
|
EXPORT_SYMBOL(bio_split);
|