io.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /*
  2. * Some low level IO code, and hacks for various block layer limitations
  3. *
  4. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  5. * Copyright 2012 Google, Inc.
  6. */
  7. #include "bcache.h"
  8. #include "bset.h"
  9. #include "debug.h"
  10. #include <linux/blkdev.h>
  11. /* Bios with headers */
  12. void bch_bbio_free(struct bio *bio, struct cache_set *c)
  13. {
  14. struct bbio *b = container_of(bio, struct bbio, bio);
  15. mempool_free(b, c->bio_meta);
  16. }
  17. struct bio *bch_bbio_alloc(struct cache_set *c)
  18. {
  19. struct bbio *b = mempool_alloc(c->bio_meta, GFP_NOIO);
  20. struct bio *bio = &b->bio;
  21. bio_init(bio, bio->bi_inline_vecs, bucket_pages(c));
  22. return bio;
  23. }
  24. void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
  25. {
  26. struct bbio *b = container_of(bio, struct bbio, bio);
  27. bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
  28. bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
  29. b->submit_time_us = local_clock_us();
  30. closure_bio_submit(bio, bio->bi_private);
  31. }
  32. void bch_submit_bbio(struct bio *bio, struct cache_set *c,
  33. struct bkey *k, unsigned ptr)
  34. {
  35. struct bbio *b = container_of(bio, struct bbio, bio);
  36. bch_bkey_copy_single_ptr(&b->key, k, ptr);
  37. __bch_submit_bbio(bio, c);
  38. }
  39. /* IO errors */
  40. void bch_count_io_errors(struct cache *ca, int error, const char *m)
  41. {
  42. /*
  43. * The halflife of an error is:
  44. * log2(1/2)/log2(127/128) * refresh ~= 88 * refresh
  45. */
  46. if (ca->set->error_decay) {
  47. unsigned count = atomic_inc_return(&ca->io_count);
  48. while (count > ca->set->error_decay) {
  49. unsigned errors;
  50. unsigned old = count;
  51. unsigned new = count - ca->set->error_decay;
  52. /*
  53. * First we subtract refresh from count; each time we
  54. * succesfully do so, we rescale the errors once:
  55. */
  56. count = atomic_cmpxchg(&ca->io_count, old, new);
  57. if (count == old) {
  58. count = new;
  59. errors = atomic_read(&ca->io_errors);
  60. do {
  61. old = errors;
  62. new = ((uint64_t) errors * 127) / 128;
  63. errors = atomic_cmpxchg(&ca->io_errors,
  64. old, new);
  65. } while (old != errors);
  66. }
  67. }
  68. }
  69. if (error) {
  70. char buf[BDEVNAME_SIZE];
  71. unsigned errors = atomic_add_return(1 << IO_ERROR_SHIFT,
  72. &ca->io_errors);
  73. errors >>= IO_ERROR_SHIFT;
  74. if (errors < ca->set->error_limit)
  75. pr_err("%s: IO error on %s, recovering",
  76. bdevname(ca->bdev, buf), m);
  77. else
  78. bch_cache_set_error(ca->set,
  79. "%s: too many IO errors %s",
  80. bdevname(ca->bdev, buf), m);
  81. }
  82. }
  83. void bch_bbio_count_io_errors(struct cache_set *c, struct bio *bio,
  84. int error, const char *m)
  85. {
  86. struct bbio *b = container_of(bio, struct bbio, bio);
  87. struct cache *ca = PTR_CACHE(c, &b->key, 0);
  88. unsigned threshold = op_is_write(bio_op(bio))
  89. ? c->congested_write_threshold_us
  90. : c->congested_read_threshold_us;
  91. if (threshold) {
  92. unsigned t = local_clock_us();
  93. int us = t - b->submit_time_us;
  94. int congested = atomic_read(&c->congested);
  95. if (us > (int) threshold) {
  96. int ms = us / 1024;
  97. c->congested_last_us = t;
  98. ms = min(ms, CONGESTED_MAX + congested);
  99. atomic_sub(ms, &c->congested);
  100. } else if (congested < 0)
  101. atomic_inc(&c->congested);
  102. }
  103. bch_count_io_errors(ca, error, m);
  104. }
  105. void bch_bbio_endio(struct cache_set *c, struct bio *bio,
  106. int error, const char *m)
  107. {
  108. struct closure *cl = bio->bi_private;
  109. bch_bbio_count_io_errors(c, bio, error, m);
  110. bio_put(bio);
  111. closure_put(cl);
  112. }