writeback.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. /*
  2. * background writeback - scan btree for dirty data and write it to the backing
  3. * device
  4. *
  5. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6. * Copyright 2012 Google, Inc.
  7. */
  8. #include "bcache.h"
  9. #include "btree.h"
  10. #include "debug.h"
  11. #include "writeback.h"
  12. #include <linux/delay.h>
  13. #include <linux/freezer.h>
  14. #include <linux/kthread.h>
  15. #include <trace/events/bcache.h>
  16. /* Rate limiting */
  17. static void __update_writeback_rate(struct cached_dev *dc)
  18. {
  19. struct cache_set *c = dc->disk.c;
  20. uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
  21. uint64_t cache_dirty_target =
  22. div_u64(cache_sectors * dc->writeback_percent, 100);
  23. int64_t target = div64_u64(cache_dirty_target * bdev_sectors(dc->bdev),
  24. c->cached_dev_sectors);
  25. /* PD controller */
  26. int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
  27. int64_t derivative = dirty - dc->disk.sectors_dirty_last;
  28. int64_t proportional = dirty - target;
  29. int64_t change;
  30. dc->disk.sectors_dirty_last = dirty;
  31. /* Scale to sectors per second */
  32. proportional *= dc->writeback_rate_update_seconds;
  33. proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
  34. derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
  35. derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
  36. (dc->writeback_rate_d_term /
  37. dc->writeback_rate_update_seconds) ?: 1, 0);
  38. derivative *= dc->writeback_rate_d_term;
  39. derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
  40. change = proportional + derivative;
  41. /* Don't increase writeback rate if the device isn't keeping up */
  42. if (change > 0 &&
  43. time_after64(local_clock(),
  44. dc->writeback_rate.next + NSEC_PER_MSEC))
  45. change = 0;
  46. dc->writeback_rate.rate =
  47. clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
  48. 1, NSEC_PER_MSEC);
  49. dc->writeback_rate_proportional = proportional;
  50. dc->writeback_rate_derivative = derivative;
  51. dc->writeback_rate_change = change;
  52. dc->writeback_rate_target = target;
  53. }
  54. static void update_writeback_rate(struct work_struct *work)
  55. {
  56. struct cached_dev *dc = container_of(to_delayed_work(work),
  57. struct cached_dev,
  58. writeback_rate_update);
  59. down_read(&dc->writeback_lock);
  60. if (atomic_read(&dc->has_dirty) &&
  61. dc->writeback_percent)
  62. __update_writeback_rate(dc);
  63. up_read(&dc->writeback_lock);
  64. schedule_delayed_work(&dc->writeback_rate_update,
  65. dc->writeback_rate_update_seconds * HZ);
  66. }
  67. static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
  68. {
  69. if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
  70. !dc->writeback_percent)
  71. return 0;
  72. return bch_next_delay(&dc->writeback_rate, sectors);
  73. }
  74. struct dirty_io {
  75. struct closure cl;
  76. struct cached_dev *dc;
  77. struct bio bio;
  78. };
  79. static void dirty_init(struct keybuf_key *w)
  80. {
  81. struct dirty_io *io = w->private;
  82. struct bio *bio = &io->bio;
  83. bio_init(bio);
  84. if (!io->dc->writeback_percent)
  85. bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
  86. bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
  87. bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
  88. bio->bi_private = w;
  89. bio->bi_io_vec = bio->bi_inline_vecs;
  90. bch_bio_map(bio, NULL);
  91. }
  92. static void dirty_io_destructor(struct closure *cl)
  93. {
  94. struct dirty_io *io = container_of(cl, struct dirty_io, cl);
  95. kfree(io);
  96. }
  97. static void write_dirty_finish(struct closure *cl)
  98. {
  99. struct dirty_io *io = container_of(cl, struct dirty_io, cl);
  100. struct keybuf_key *w = io->bio.bi_private;
  101. struct cached_dev *dc = io->dc;
  102. struct bio_vec *bv;
  103. int i;
  104. bio_for_each_segment_all(bv, &io->bio, i)
  105. __free_page(bv->bv_page);
  106. /* This is kind of a dumb way of signalling errors. */
  107. if (KEY_DIRTY(&w->key)) {
  108. int ret;
  109. unsigned i;
  110. struct keylist keys;
  111. bch_keylist_init(&keys);
  112. bkey_copy(keys.top, &w->key);
  113. SET_KEY_DIRTY(keys.top, false);
  114. bch_keylist_push(&keys);
  115. for (i = 0; i < KEY_PTRS(&w->key); i++)
  116. atomic_inc(&PTR_BUCKET(dc->disk.c, &w->key, i)->pin);
  117. ret = bch_btree_insert(dc->disk.c, &keys, NULL, &w->key);
  118. if (ret)
  119. trace_bcache_writeback_collision(&w->key);
  120. atomic_long_inc(ret
  121. ? &dc->disk.c->writeback_keys_failed
  122. : &dc->disk.c->writeback_keys_done);
  123. }
  124. bch_keybuf_del(&dc->writeback_keys, w);
  125. up(&dc->in_flight);
  126. closure_return_with_destructor(cl, dirty_io_destructor);
  127. }
  128. static void dirty_endio(struct bio *bio)
  129. {
  130. struct keybuf_key *w = bio->bi_private;
  131. struct dirty_io *io = w->private;
  132. if (bio->bi_error)
  133. SET_KEY_DIRTY(&w->key, false);
  134. closure_put(&io->cl);
  135. }
  136. static void write_dirty(struct closure *cl)
  137. {
  138. struct dirty_io *io = container_of(cl, struct dirty_io, cl);
  139. struct keybuf_key *w = io->bio.bi_private;
  140. dirty_init(w);
  141. io->bio.bi_rw = WRITE;
  142. io->bio.bi_iter.bi_sector = KEY_START(&w->key);
  143. io->bio.bi_bdev = io->dc->bdev;
  144. io->bio.bi_end_io = dirty_endio;
  145. closure_bio_submit(&io->bio, cl);
  146. continue_at(cl, write_dirty_finish, system_wq);
  147. }
  148. static void read_dirty_endio(struct bio *bio)
  149. {
  150. struct keybuf_key *w = bio->bi_private;
  151. struct dirty_io *io = w->private;
  152. bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
  153. bio->bi_error, "reading dirty data from cache");
  154. dirty_endio(bio);
  155. }
  156. static void read_dirty_submit(struct closure *cl)
  157. {
  158. struct dirty_io *io = container_of(cl, struct dirty_io, cl);
  159. closure_bio_submit(&io->bio, cl);
  160. continue_at(cl, write_dirty, system_wq);
  161. }
  162. static void read_dirty(struct cached_dev *dc)
  163. {
  164. unsigned delay = 0;
  165. struct keybuf_key *w;
  166. struct dirty_io *io;
  167. struct closure cl;
  168. closure_init_stack(&cl);
  169. /*
  170. * XXX: if we error, background writeback just spins. Should use some
  171. * mempools.
  172. */
  173. while (!kthread_should_stop()) {
  174. try_to_freeze();
  175. w = bch_keybuf_next(&dc->writeback_keys);
  176. if (!w)
  177. break;
  178. BUG_ON(ptr_stale(dc->disk.c, &w->key, 0));
  179. if (KEY_START(&w->key) != dc->last_read ||
  180. jiffies_to_msecs(delay) > 50)
  181. while (!kthread_should_stop() && delay)
  182. delay = schedule_timeout_interruptible(delay);
  183. dc->last_read = KEY_OFFSET(&w->key);
  184. io = kzalloc(sizeof(struct dirty_io) + sizeof(struct bio_vec)
  185. * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
  186. GFP_KERNEL);
  187. if (!io)
  188. goto err;
  189. w->private = io;
  190. io->dc = dc;
  191. dirty_init(w);
  192. io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
  193. io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
  194. &w->key, 0)->bdev;
  195. io->bio.bi_rw = READ;
  196. io->bio.bi_end_io = read_dirty_endio;
  197. if (bio_alloc_pages(&io->bio, GFP_KERNEL))
  198. goto err_free;
  199. trace_bcache_writeback(&w->key);
  200. down(&dc->in_flight);
  201. closure_call(&io->cl, read_dirty_submit, NULL, &cl);
  202. delay = writeback_delay(dc, KEY_SIZE(&w->key));
  203. }
  204. if (0) {
  205. err_free:
  206. kfree(w->private);
  207. err:
  208. bch_keybuf_del(&dc->writeback_keys, w);
  209. }
  210. /*
  211. * Wait for outstanding writeback IOs to finish (and keybuf slots to be
  212. * freed) before refilling again
  213. */
  214. closure_sync(&cl);
  215. }
  216. /* Scan for dirty data */
  217. void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
  218. uint64_t offset, int nr_sectors)
  219. {
  220. struct bcache_device *d = c->devices[inode];
  221. unsigned stripe_offset, stripe, sectors_dirty;
  222. if (!d)
  223. return;
  224. stripe = offset_to_stripe(d, offset);
  225. stripe_offset = offset & (d->stripe_size - 1);
  226. while (nr_sectors) {
  227. int s = min_t(unsigned, abs(nr_sectors),
  228. d->stripe_size - stripe_offset);
  229. if (nr_sectors < 0)
  230. s = -s;
  231. if (stripe >= d->nr_stripes)
  232. return;
  233. sectors_dirty = atomic_add_return(s,
  234. d->stripe_sectors_dirty + stripe);
  235. if (sectors_dirty == d->stripe_size)
  236. set_bit(stripe, d->full_dirty_stripes);
  237. else
  238. clear_bit(stripe, d->full_dirty_stripes);
  239. nr_sectors -= s;
  240. stripe_offset = 0;
  241. stripe++;
  242. }
  243. }
  244. static bool dirty_pred(struct keybuf *buf, struct bkey *k)
  245. {
  246. return KEY_DIRTY(k);
  247. }
  248. static void refill_full_stripes(struct cached_dev *dc)
  249. {
  250. struct keybuf *buf = &dc->writeback_keys;
  251. unsigned start_stripe, stripe, next_stripe;
  252. bool wrapped = false;
  253. stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
  254. if (stripe >= dc->disk.nr_stripes)
  255. stripe = 0;
  256. start_stripe = stripe;
  257. while (1) {
  258. stripe = find_next_bit(dc->disk.full_dirty_stripes,
  259. dc->disk.nr_stripes, stripe);
  260. if (stripe == dc->disk.nr_stripes)
  261. goto next;
  262. next_stripe = find_next_zero_bit(dc->disk.full_dirty_stripes,
  263. dc->disk.nr_stripes, stripe);
  264. buf->last_scanned = KEY(dc->disk.id,
  265. stripe * dc->disk.stripe_size, 0);
  266. bch_refill_keybuf(dc->disk.c, buf,
  267. &KEY(dc->disk.id,
  268. next_stripe * dc->disk.stripe_size, 0),
  269. dirty_pred);
  270. if (array_freelist_empty(&buf->freelist))
  271. return;
  272. stripe = next_stripe;
  273. next:
  274. if (wrapped && stripe > start_stripe)
  275. return;
  276. if (stripe == dc->disk.nr_stripes) {
  277. stripe = 0;
  278. wrapped = true;
  279. }
  280. }
  281. }
  282. static bool refill_dirty(struct cached_dev *dc)
  283. {
  284. struct keybuf *buf = &dc->writeback_keys;
  285. struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
  286. bool searched_from_start = false;
  287. if (dc->partial_stripes_expensive) {
  288. refill_full_stripes(dc);
  289. if (array_freelist_empty(&buf->freelist))
  290. return false;
  291. }
  292. if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
  293. buf->last_scanned = KEY(dc->disk.id, 0, 0);
  294. searched_from_start = true;
  295. }
  296. bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
  297. return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
  298. }
  299. static int bch_writeback_thread(void *arg)
  300. {
  301. struct cached_dev *dc = arg;
  302. bool searched_full_index;
  303. while (!kthread_should_stop()) {
  304. down_write(&dc->writeback_lock);
  305. if (!atomic_read(&dc->has_dirty) ||
  306. (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
  307. !dc->writeback_running)) {
  308. up_write(&dc->writeback_lock);
  309. set_current_state(TASK_INTERRUPTIBLE);
  310. if (kthread_should_stop())
  311. return 0;
  312. try_to_freeze();
  313. schedule();
  314. continue;
  315. }
  316. searched_full_index = refill_dirty(dc);
  317. if (searched_full_index &&
  318. RB_EMPTY_ROOT(&dc->writeback_keys.keys)) {
  319. atomic_set(&dc->has_dirty, 0);
  320. cached_dev_put(dc);
  321. SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
  322. bch_write_bdev_super(dc, NULL);
  323. }
  324. up_write(&dc->writeback_lock);
  325. bch_ratelimit_reset(&dc->writeback_rate);
  326. read_dirty(dc);
  327. if (searched_full_index) {
  328. unsigned delay = dc->writeback_delay * HZ;
  329. while (delay &&
  330. !kthread_should_stop() &&
  331. !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
  332. delay = schedule_timeout_interruptible(delay);
  333. }
  334. }
  335. return 0;
  336. }
  337. /* Init */
  338. struct sectors_dirty_init {
  339. struct btree_op op;
  340. unsigned inode;
  341. };
  342. static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
  343. struct bkey *k)
  344. {
  345. struct sectors_dirty_init *op = container_of(_op,
  346. struct sectors_dirty_init, op);
  347. if (KEY_INODE(k) > op->inode)
  348. return MAP_DONE;
  349. if (KEY_DIRTY(k))
  350. bcache_dev_sectors_dirty_add(b->c, KEY_INODE(k),
  351. KEY_START(k), KEY_SIZE(k));
  352. return MAP_CONTINUE;
  353. }
  354. void bch_sectors_dirty_init(struct cached_dev *dc)
  355. {
  356. struct sectors_dirty_init op;
  357. bch_btree_op_init(&op.op, -1);
  358. op.inode = dc->disk.id;
  359. bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
  360. sectors_dirty_init_fn, 0);
  361. dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
  362. }
  363. void bch_cached_dev_writeback_init(struct cached_dev *dc)
  364. {
  365. sema_init(&dc->in_flight, 64);
  366. init_rwsem(&dc->writeback_lock);
  367. bch_keybuf_init(&dc->writeback_keys);
  368. dc->writeback_metadata = true;
  369. dc->writeback_running = true;
  370. dc->writeback_percent = 10;
  371. dc->writeback_delay = 30;
  372. dc->writeback_rate.rate = 1024;
  373. dc->writeback_rate_update_seconds = 5;
  374. dc->writeback_rate_d_term = 30;
  375. dc->writeback_rate_p_term_inverse = 6000;
  376. INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
  377. }
  378. int bch_cached_dev_writeback_start(struct cached_dev *dc)
  379. {
  380. dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
  381. "bcache_writeback");
  382. if (IS_ERR(dc->writeback_thread))
  383. return PTR_ERR(dc->writeback_thread);
  384. schedule_delayed_work(&dc->writeback_rate_update,
  385. dc->writeback_rate_update_seconds * HZ);
  386. bch_writeback_queue(dc);
  387. return 0;
  388. }