journal.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * bcache journalling code, for btree insertions
  4. *
  5. * Copyright 2012 Google, Inc.
  6. */
  7. #include "bcache.h"
  8. #include "btree.h"
  9. #include "debug.h"
  10. #include "extents.h"
  11. #include <trace/events/bcache.h>
  12. /*
  13. * Journal replay/recovery:
  14. *
  15. * This code is all driven from run_cache_set(); we first read the journal
  16. * entries, do some other stuff, then we mark all the keys in the journal
  17. * entries (same as garbage collection would), then we replay them - reinserting
  18. * them into the cache in precisely the same order as they appear in the
  19. * journal.
  20. *
  21. * We only journal keys that go in leaf nodes, which simplifies things quite a
  22. * bit.
  23. */
  24. static void journal_read_endio(struct bio *bio)
  25. {
  26. struct closure *cl = bio->bi_private;
  27. closure_put(cl);
  28. }
  29. static int journal_read_bucket(struct cache *ca, struct list_head *list,
  30. unsigned bucket_index)
  31. {
  32. struct journal_device *ja = &ca->journal;
  33. struct bio *bio = &ja->bio;
  34. struct journal_replay *i;
  35. struct jset *j, *data = ca->set->journal.w[0].data;
  36. struct closure cl;
  37. unsigned len, left, offset = 0;
  38. int ret = 0;
  39. sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
  40. closure_init_stack(&cl);
  41. pr_debug("reading %u", bucket_index);
  42. while (offset < ca->sb.bucket_size) {
  43. reread: left = ca->sb.bucket_size - offset;
  44. len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
  45. bio_reset(bio);
  46. bio->bi_iter.bi_sector = bucket + offset;
  47. bio_set_dev(bio, ca->bdev);
  48. bio->bi_iter.bi_size = len << 9;
  49. bio->bi_end_io = journal_read_endio;
  50. bio->bi_private = &cl;
  51. bio_set_op_attrs(bio, REQ_OP_READ, 0);
  52. bch_bio_map(bio, data);
  53. closure_bio_submit(ca->set, bio, &cl);
  54. closure_sync(&cl);
  55. /* This function could be simpler now since we no longer write
  56. * journal entries that overlap bucket boundaries; this means
  57. * the start of a bucket will always have a valid journal entry
  58. * if it has any journal entries at all.
  59. */
  60. j = data;
  61. while (len) {
  62. struct list_head *where;
  63. size_t blocks, bytes = set_bytes(j);
  64. if (j->magic != jset_magic(&ca->sb)) {
  65. pr_debug("%u: bad magic", bucket_index);
  66. return ret;
  67. }
  68. if (bytes > left << 9 ||
  69. bytes > PAGE_SIZE << JSET_BITS) {
  70. pr_info("%u: too big, %zu bytes, offset %u",
  71. bucket_index, bytes, offset);
  72. return ret;
  73. }
  74. if (bytes > len << 9)
  75. goto reread;
  76. if (j->csum != csum_set(j)) {
  77. pr_info("%u: bad csum, %zu bytes, offset %u",
  78. bucket_index, bytes, offset);
  79. return ret;
  80. }
  81. blocks = set_blocks(j, block_bytes(ca->set));
  82. while (!list_empty(list)) {
  83. i = list_first_entry(list,
  84. struct journal_replay, list);
  85. if (i->j.seq >= j->last_seq)
  86. break;
  87. list_del(&i->list);
  88. kfree(i);
  89. }
  90. list_for_each_entry_reverse(i, list, list) {
  91. if (j->seq == i->j.seq)
  92. goto next_set;
  93. if (j->seq < i->j.last_seq)
  94. goto next_set;
  95. if (j->seq > i->j.seq) {
  96. where = &i->list;
  97. goto add;
  98. }
  99. }
  100. where = list;
  101. add:
  102. i = kmalloc(offsetof(struct journal_replay, j) +
  103. bytes, GFP_KERNEL);
  104. if (!i)
  105. return -ENOMEM;
  106. memcpy(&i->j, j, bytes);
  107. list_add(&i->list, where);
  108. ret = 1;
  109. ja->seq[bucket_index] = j->seq;
  110. next_set:
  111. offset += blocks * ca->sb.block_size;
  112. len -= blocks * ca->sb.block_size;
  113. j = ((void *) j) + blocks * block_bytes(ca);
  114. }
  115. }
  116. return ret;
  117. }
  118. int bch_journal_read(struct cache_set *c, struct list_head *list)
  119. {
  120. #define read_bucket(b) \
  121. ({ \
  122. int ret = journal_read_bucket(ca, list, b); \
  123. __set_bit(b, bitmap); \
  124. if (ret < 0) \
  125. return ret; \
  126. ret; \
  127. })
  128. struct cache *ca;
  129. unsigned iter;
  130. for_each_cache(ca, c, iter) {
  131. struct journal_device *ja = &ca->journal;
  132. DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
  133. unsigned i, l, r, m;
  134. uint64_t seq;
  135. bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
  136. pr_debug("%u journal buckets", ca->sb.njournal_buckets);
  137. /*
  138. * Read journal buckets ordered by golden ratio hash to quickly
  139. * find a sequence of buckets with valid journal entries
  140. */
  141. for (i = 0; i < ca->sb.njournal_buckets; i++) {
  142. /*
  143. * We must try the index l with ZERO first for
  144. * correctness due to the scenario that the journal
  145. * bucket is circular buffer which might have wrapped
  146. */
  147. l = (i * 2654435769U) % ca->sb.njournal_buckets;
  148. if (test_bit(l, bitmap))
  149. break;
  150. if (read_bucket(l))
  151. goto bsearch;
  152. }
  153. /*
  154. * If that fails, check all the buckets we haven't checked
  155. * already
  156. */
  157. pr_debug("falling back to linear search");
  158. for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
  159. l < ca->sb.njournal_buckets;
  160. l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets, l + 1))
  161. if (read_bucket(l))
  162. goto bsearch;
  163. /* no journal entries on this device? */
  164. if (l == ca->sb.njournal_buckets)
  165. continue;
  166. bsearch:
  167. BUG_ON(list_empty(list));
  168. /* Binary search */
  169. m = l;
  170. r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
  171. pr_debug("starting binary search, l %u r %u", l, r);
  172. while (l + 1 < r) {
  173. seq = list_entry(list->prev, struct journal_replay,
  174. list)->j.seq;
  175. m = (l + r) >> 1;
  176. read_bucket(m);
  177. if (seq != list_entry(list->prev, struct journal_replay,
  178. list)->j.seq)
  179. l = m;
  180. else
  181. r = m;
  182. }
  183. /*
  184. * Read buckets in reverse order until we stop finding more
  185. * journal entries
  186. */
  187. pr_debug("finishing up: m %u njournal_buckets %u",
  188. m, ca->sb.njournal_buckets);
  189. l = m;
  190. while (1) {
  191. if (!l--)
  192. l = ca->sb.njournal_buckets - 1;
  193. if (l == m)
  194. break;
  195. if (test_bit(l, bitmap))
  196. continue;
  197. if (!read_bucket(l))
  198. break;
  199. }
  200. seq = 0;
  201. for (i = 0; i < ca->sb.njournal_buckets; i++)
  202. if (ja->seq[i] > seq) {
  203. seq = ja->seq[i];
  204. /*
  205. * When journal_reclaim() goes to allocate for
  206. * the first time, it'll use the bucket after
  207. * ja->cur_idx
  208. */
  209. ja->cur_idx = i;
  210. ja->last_idx = ja->discard_idx = (i + 1) %
  211. ca->sb.njournal_buckets;
  212. }
  213. }
  214. if (!list_empty(list))
  215. c->journal.seq = list_entry(list->prev,
  216. struct journal_replay,
  217. list)->j.seq;
  218. return 0;
  219. #undef read_bucket
  220. }
  221. void bch_journal_mark(struct cache_set *c, struct list_head *list)
  222. {
  223. atomic_t p = { 0 };
  224. struct bkey *k;
  225. struct journal_replay *i;
  226. struct journal *j = &c->journal;
  227. uint64_t last = j->seq;
  228. /*
  229. * journal.pin should never fill up - we never write a journal
  230. * entry when it would fill up. But if for some reason it does, we
  231. * iterate over the list in reverse order so that we can just skip that
  232. * refcount instead of bugging.
  233. */
  234. list_for_each_entry_reverse(i, list, list) {
  235. BUG_ON(last < i->j.seq);
  236. i->pin = NULL;
  237. while (last-- != i->j.seq)
  238. if (fifo_free(&j->pin) > 1) {
  239. fifo_push_front(&j->pin, p);
  240. atomic_set(&fifo_front(&j->pin), 0);
  241. }
  242. if (fifo_free(&j->pin) > 1) {
  243. fifo_push_front(&j->pin, p);
  244. i->pin = &fifo_front(&j->pin);
  245. atomic_set(i->pin, 1);
  246. }
  247. for (k = i->j.start;
  248. k < bset_bkey_last(&i->j);
  249. k = bkey_next(k))
  250. if (!__bch_extent_invalid(c, k)) {
  251. unsigned j;
  252. for (j = 0; j < KEY_PTRS(k); j++)
  253. if (ptr_available(c, k, j))
  254. atomic_inc(&PTR_BUCKET(c, k, j)->pin);
  255. bch_initial_mark_key(c, 0, k);
  256. }
  257. }
  258. }
  259. int bch_journal_replay(struct cache_set *s, struct list_head *list)
  260. {
  261. int ret = 0, keys = 0, entries = 0;
  262. struct bkey *k;
  263. struct journal_replay *i =
  264. list_entry(list->prev, struct journal_replay, list);
  265. uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
  266. struct keylist keylist;
  267. list_for_each_entry(i, list, list) {
  268. BUG_ON(i->pin && atomic_read(i->pin) != 1);
  269. cache_set_err_on(n != i->j.seq, s,
  270. "bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
  271. n, i->j.seq - 1, start, end);
  272. for (k = i->j.start;
  273. k < bset_bkey_last(&i->j);
  274. k = bkey_next(k)) {
  275. trace_bcache_journal_replay_key(k);
  276. bch_keylist_init_single(&keylist, k);
  277. ret = bch_btree_insert(s, &keylist, i->pin, NULL);
  278. if (ret)
  279. goto err;
  280. BUG_ON(!bch_keylist_empty(&keylist));
  281. keys++;
  282. cond_resched();
  283. }
  284. if (i->pin)
  285. atomic_dec(i->pin);
  286. n = i->j.seq + 1;
  287. entries++;
  288. }
  289. pr_info("journal replay done, %i keys in %i entries, seq %llu",
  290. keys, entries, end);
  291. err:
  292. while (!list_empty(list)) {
  293. i = list_first_entry(list, struct journal_replay, list);
  294. list_del(&i->list);
  295. kfree(i);
  296. }
  297. return ret;
  298. }
  299. /* Journalling */
  300. #define journal_max_cmp(l, r) \
  301. (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) < \
  302. fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
  303. #define journal_min_cmp(l, r) \
  304. (fifo_idx(&c->journal.pin, btree_current_write(l)->journal) > \
  305. fifo_idx(&(c)->journal.pin, btree_current_write(r)->journal))
  306. static void btree_flush_write(struct cache_set *c)
  307. {
  308. /*
  309. * Try to find the btree node with that references the oldest journal
  310. * entry, best is our current candidate and is locked if non NULL:
  311. */
  312. struct btree *b;
  313. int i;
  314. atomic_long_inc(&c->flush_write);
  315. retry:
  316. spin_lock(&c->journal.lock);
  317. if (heap_empty(&c->flush_btree)) {
  318. for_each_cached_btree(b, c, i)
  319. if (btree_current_write(b)->journal) {
  320. if (!heap_full(&c->flush_btree))
  321. heap_add(&c->flush_btree, b,
  322. journal_max_cmp);
  323. else if (journal_max_cmp(b,
  324. heap_peek(&c->flush_btree))) {
  325. c->flush_btree.data[0] = b;
  326. heap_sift(&c->flush_btree, 0,
  327. journal_max_cmp);
  328. }
  329. }
  330. for (i = c->flush_btree.used / 2 - 1; i >= 0; --i)
  331. heap_sift(&c->flush_btree, i, journal_min_cmp);
  332. }
  333. b = NULL;
  334. heap_pop(&c->flush_btree, b, journal_min_cmp);
  335. spin_unlock(&c->journal.lock);
  336. if (b) {
  337. mutex_lock(&b->write_lock);
  338. if (!btree_current_write(b)->journal) {
  339. mutex_unlock(&b->write_lock);
  340. /* We raced */
  341. atomic_long_inc(&c->retry_flush_write);
  342. goto retry;
  343. }
  344. __bch_btree_node_write(b, NULL);
  345. mutex_unlock(&b->write_lock);
  346. }
  347. }
  348. #define last_seq(j) ((j)->seq - fifo_used(&(j)->pin) + 1)
  349. static void journal_discard_endio(struct bio *bio)
  350. {
  351. struct journal_device *ja =
  352. container_of(bio, struct journal_device, discard_bio);
  353. struct cache *ca = container_of(ja, struct cache, journal);
  354. atomic_set(&ja->discard_in_flight, DISCARD_DONE);
  355. closure_wake_up(&ca->set->journal.wait);
  356. closure_put(&ca->set->cl);
  357. }
  358. static void journal_discard_work(struct work_struct *work)
  359. {
  360. struct journal_device *ja =
  361. container_of(work, struct journal_device, discard_work);
  362. submit_bio(&ja->discard_bio);
  363. }
  364. static void do_journal_discard(struct cache *ca)
  365. {
  366. struct journal_device *ja = &ca->journal;
  367. struct bio *bio = &ja->discard_bio;
  368. if (!ca->discard) {
  369. ja->discard_idx = ja->last_idx;
  370. return;
  371. }
  372. switch (atomic_read(&ja->discard_in_flight)) {
  373. case DISCARD_IN_FLIGHT:
  374. return;
  375. case DISCARD_DONE:
  376. ja->discard_idx = (ja->discard_idx + 1) %
  377. ca->sb.njournal_buckets;
  378. atomic_set(&ja->discard_in_flight, DISCARD_READY);
  379. /* fallthrough */
  380. case DISCARD_READY:
  381. if (ja->discard_idx == ja->last_idx)
  382. return;
  383. atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
  384. bio_init(bio, bio->bi_inline_vecs, 1);
  385. bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
  386. bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
  387. ca->sb.d[ja->discard_idx]);
  388. bio_set_dev(bio, ca->bdev);
  389. bio->bi_iter.bi_size = bucket_bytes(ca);
  390. bio->bi_end_io = journal_discard_endio;
  391. closure_get(&ca->set->cl);
  392. INIT_WORK(&ja->discard_work, journal_discard_work);
  393. schedule_work(&ja->discard_work);
  394. }
  395. }
  396. static void journal_reclaim(struct cache_set *c)
  397. {
  398. struct bkey *k = &c->journal.key;
  399. struct cache *ca;
  400. uint64_t last_seq;
  401. unsigned iter, n = 0;
  402. atomic_t p __maybe_unused;
  403. atomic_long_inc(&c->reclaim);
  404. while (!atomic_read(&fifo_front(&c->journal.pin)))
  405. fifo_pop(&c->journal.pin, p);
  406. last_seq = last_seq(&c->journal);
  407. /* Update last_idx */
  408. for_each_cache(ca, c, iter) {
  409. struct journal_device *ja = &ca->journal;
  410. while (ja->last_idx != ja->cur_idx &&
  411. ja->seq[ja->last_idx] < last_seq)
  412. ja->last_idx = (ja->last_idx + 1) %
  413. ca->sb.njournal_buckets;
  414. }
  415. for_each_cache(ca, c, iter)
  416. do_journal_discard(ca);
  417. if (c->journal.blocks_free)
  418. goto out;
  419. /*
  420. * Allocate:
  421. * XXX: Sort by free journal space
  422. */
  423. for_each_cache(ca, c, iter) {
  424. struct journal_device *ja = &ca->journal;
  425. unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
  426. /* No space available on this device */
  427. if (next == ja->discard_idx)
  428. continue;
  429. ja->cur_idx = next;
  430. k->ptr[n++] = MAKE_PTR(0,
  431. bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
  432. ca->sb.nr_this_dev);
  433. }
  434. bkey_init(k);
  435. SET_KEY_PTRS(k, n);
  436. if (n)
  437. c->journal.blocks_free = c->sb.bucket_size >> c->block_bits;
  438. out:
  439. if (!journal_full(&c->journal))
  440. __closure_wake_up(&c->journal.wait);
  441. }
  442. void bch_journal_next(struct journal *j)
  443. {
  444. atomic_t p = { 1 };
  445. j->cur = (j->cur == j->w)
  446. ? &j->w[1]
  447. : &j->w[0];
  448. /*
  449. * The fifo_push() needs to happen at the same time as j->seq is
  450. * incremented for last_seq() to be calculated correctly
  451. */
  452. BUG_ON(!fifo_push(&j->pin, p));
  453. atomic_set(&fifo_back(&j->pin), 1);
  454. j->cur->data->seq = ++j->seq;
  455. j->cur->dirty = false;
  456. j->cur->need_write = false;
  457. j->cur->data->keys = 0;
  458. if (fifo_full(&j->pin))
  459. pr_debug("journal_pin full (%zu)", fifo_used(&j->pin));
  460. }
  461. static void journal_write_endio(struct bio *bio)
  462. {
  463. struct journal_write *w = bio->bi_private;
  464. cache_set_err_on(bio->bi_status, w->c, "journal io error");
  465. closure_put(&w->c->journal.io);
  466. }
  467. static void journal_write(struct closure *);
  468. static void journal_write_done(struct closure *cl)
  469. {
  470. struct journal *j = container_of(cl, struct journal, io);
  471. struct journal_write *w = (j->cur == j->w)
  472. ? &j->w[1]
  473. : &j->w[0];
  474. __closure_wake_up(&w->wait);
  475. continue_at_nobarrier(cl, journal_write, system_wq);
  476. }
  477. static void journal_write_unlock(struct closure *cl)
  478. __releases(&c->journal.lock)
  479. {
  480. struct cache_set *c = container_of(cl, struct cache_set, journal.io);
  481. c->journal.io_in_flight = 0;
  482. spin_unlock(&c->journal.lock);
  483. }
  484. static void journal_write_unlocked(struct closure *cl)
  485. __releases(c->journal.lock)
  486. {
  487. struct cache_set *c = container_of(cl, struct cache_set, journal.io);
  488. struct cache *ca;
  489. struct journal_write *w = c->journal.cur;
  490. struct bkey *k = &c->journal.key;
  491. unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
  492. c->sb.block_size;
  493. struct bio *bio;
  494. struct bio_list list;
  495. bio_list_init(&list);
  496. if (!w->need_write) {
  497. closure_return_with_destructor(cl, journal_write_unlock);
  498. return;
  499. } else if (journal_full(&c->journal)) {
  500. journal_reclaim(c);
  501. spin_unlock(&c->journal.lock);
  502. btree_flush_write(c);
  503. continue_at(cl, journal_write, system_wq);
  504. return;
  505. }
  506. c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));
  507. w->data->btree_level = c->root->level;
  508. bkey_copy(&w->data->btree_root, &c->root->key);
  509. bkey_copy(&w->data->uuid_bucket, &c->uuid_bucket);
  510. for_each_cache(ca, c, i)
  511. w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
  512. w->data->magic = jset_magic(&c->sb);
  513. w->data->version = BCACHE_JSET_VERSION;
  514. w->data->last_seq = last_seq(&c->journal);
  515. w->data->csum = csum_set(w->data);
  516. for (i = 0; i < KEY_PTRS(k); i++) {
  517. ca = PTR_CACHE(c, k, i);
  518. bio = &ca->journal.bio;
  519. atomic_long_add(sectors, &ca->meta_sectors_written);
  520. bio_reset(bio);
  521. bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
  522. bio_set_dev(bio, ca->bdev);
  523. bio->bi_iter.bi_size = sectors << 9;
  524. bio->bi_end_io = journal_write_endio;
  525. bio->bi_private = w;
  526. bio_set_op_attrs(bio, REQ_OP_WRITE,
  527. REQ_SYNC|REQ_META|REQ_PREFLUSH|REQ_FUA);
  528. bch_bio_map(bio, w->data);
  529. trace_bcache_journal_write(bio);
  530. bio_list_add(&list, bio);
  531. SET_PTR_OFFSET(k, i, PTR_OFFSET(k, i) + sectors);
  532. ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
  533. }
  534. atomic_dec_bug(&fifo_back(&c->journal.pin));
  535. bch_journal_next(&c->journal);
  536. journal_reclaim(c);
  537. spin_unlock(&c->journal.lock);
  538. while ((bio = bio_list_pop(&list)))
  539. closure_bio_submit(c, bio, cl);
  540. continue_at(cl, journal_write_done, NULL);
  541. }
  542. static void journal_write(struct closure *cl)
  543. {
  544. struct cache_set *c = container_of(cl, struct cache_set, journal.io);
  545. spin_lock(&c->journal.lock);
  546. journal_write_unlocked(cl);
  547. }
  548. static void journal_try_write(struct cache_set *c)
  549. __releases(c->journal.lock)
  550. {
  551. struct closure *cl = &c->journal.io;
  552. struct journal_write *w = c->journal.cur;
  553. w->need_write = true;
  554. if (!c->journal.io_in_flight) {
  555. c->journal.io_in_flight = 1;
  556. closure_call(cl, journal_write_unlocked, NULL, &c->cl);
  557. } else {
  558. spin_unlock(&c->journal.lock);
  559. }
  560. }
  561. static struct journal_write *journal_wait_for_write(struct cache_set *c,
  562. unsigned nkeys)
  563. __acquires(&c->journal.lock)
  564. {
  565. size_t sectors;
  566. struct closure cl;
  567. bool wait = false;
  568. closure_init_stack(&cl);
  569. spin_lock(&c->journal.lock);
  570. while (1) {
  571. struct journal_write *w = c->journal.cur;
  572. sectors = __set_blocks(w->data, w->data->keys + nkeys,
  573. block_bytes(c)) * c->sb.block_size;
  574. if (sectors <= min_t(size_t,
  575. c->journal.blocks_free * c->sb.block_size,
  576. PAGE_SECTORS << JSET_BITS))
  577. return w;
  578. if (wait)
  579. closure_wait(&c->journal.wait, &cl);
  580. if (!journal_full(&c->journal)) {
  581. if (wait)
  582. trace_bcache_journal_entry_full(c);
  583. /*
  584. * XXX: If we were inserting so many keys that they
  585. * won't fit in an _empty_ journal write, we'll
  586. * deadlock. For now, handle this in
  587. * bch_keylist_realloc() - but something to think about.
  588. */
  589. BUG_ON(!w->data->keys);
  590. journal_try_write(c); /* unlocks */
  591. } else {
  592. if (wait)
  593. trace_bcache_journal_full(c);
  594. journal_reclaim(c);
  595. spin_unlock(&c->journal.lock);
  596. btree_flush_write(c);
  597. }
  598. closure_sync(&cl);
  599. spin_lock(&c->journal.lock);
  600. wait = true;
  601. }
  602. }
  603. static void journal_write_work(struct work_struct *work)
  604. {
  605. struct cache_set *c = container_of(to_delayed_work(work),
  606. struct cache_set,
  607. journal.work);
  608. spin_lock(&c->journal.lock);
  609. if (c->journal.cur->dirty)
  610. journal_try_write(c);
  611. else
  612. spin_unlock(&c->journal.lock);
  613. }
  614. /*
  615. * Entry point to the journalling code - bio_insert() and btree_invalidate()
  616. * pass bch_journal() a list of keys to be journalled, and then
  617. * bch_journal() hands those same keys off to btree_insert_async()
  618. */
  619. atomic_t *bch_journal(struct cache_set *c,
  620. struct keylist *keys,
  621. struct closure *parent)
  622. {
  623. struct journal_write *w;
  624. atomic_t *ret;
  625. if (!CACHE_SYNC(&c->sb))
  626. return NULL;
  627. w = journal_wait_for_write(c, bch_keylist_nkeys(keys));
  628. memcpy(bset_bkey_last(w->data), keys->keys, bch_keylist_bytes(keys));
  629. w->data->keys += bch_keylist_nkeys(keys);
  630. ret = &fifo_back(&c->journal.pin);
  631. atomic_inc(ret);
  632. if (parent) {
  633. closure_wait(&w->wait, parent);
  634. journal_try_write(c);
  635. } else if (!w->dirty) {
  636. w->dirty = true;
  637. schedule_delayed_work(&c->journal.work,
  638. msecs_to_jiffies(c->journal_delay_ms));
  639. spin_unlock(&c->journal.lock);
  640. } else {
  641. spin_unlock(&c->journal.lock);
  642. }
  643. return ret;
  644. }
  645. void bch_journal_meta(struct cache_set *c, struct closure *cl)
  646. {
  647. struct keylist keys;
  648. atomic_t *ref;
  649. bch_keylist_init(&keys);
  650. ref = bch_journal(c, &keys, cl);
  651. if (ref)
  652. atomic_dec_bug(ref);
  653. }
  654. void bch_journal_free(struct cache_set *c)
  655. {
  656. free_pages((unsigned long) c->journal.w[1].data, JSET_BITS);
  657. free_pages((unsigned long) c->journal.w[0].data, JSET_BITS);
  658. free_fifo(&c->journal.pin);
  659. }
  660. int bch_journal_alloc(struct cache_set *c)
  661. {
  662. struct journal *j = &c->journal;
  663. spin_lock_init(&j->lock);
  664. INIT_DELAYED_WORK(&j->work, journal_write_work);
  665. c->journal_delay_ms = 100;
  666. j->w[0].c = c;
  667. j->w[1].c = c;
  668. if (!(init_heap(&c->flush_btree, 128, GFP_KERNEL)) ||
  669. !(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
  670. !(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
  671. !(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
  672. return -ENOMEM;
  673. return 0;
  674. }