super.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121
  1. /*
  2. * bcache setup/teardown code, and some metadata io - read a superblock and
  3. * figure out what to do with it.
  4. *
  5. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6. * Copyright 2012 Google, Inc.
  7. */
  8. #include "bcache.h"
  9. #include "btree.h"
  10. #include "debug.h"
  11. #include "extents.h"
  12. #include "request.h"
  13. #include "writeback.h"
  14. #include <linux/blkdev.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/debugfs.h>
  17. #include <linux/genhd.h>
  18. #include <linux/idr.h>
  19. #include <linux/kthread.h>
  20. #include <linux/module.h>
  21. #include <linux/random.h>
  22. #include <linux/reboot.h>
  23. #include <linux/sysfs.h>
  24. MODULE_LICENSE("GPL");
  25. MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
  26. static const char bcache_magic[] = {
  27. 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
  28. 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
  29. };
  30. static const char invalid_uuid[] = {
  31. 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
  32. 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
  33. };
  34. /* Default is -1; we skip past it for struct cached_dev's cache mode */
  35. const char * const bch_cache_modes[] = {
  36. "default",
  37. "writethrough",
  38. "writeback",
  39. "writearound",
  40. "none",
  41. NULL
  42. };
  43. static struct kobject *bcache_kobj;
  44. struct mutex bch_register_lock;
  45. LIST_HEAD(bch_cache_sets);
  46. static LIST_HEAD(uncached_devices);
  47. static int bcache_major;
  48. static DEFINE_IDA(bcache_minor);
  49. static wait_queue_head_t unregister_wait;
  50. struct workqueue_struct *bcache_wq;
  51. #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
  52. #define BCACHE_MINORS 16 /* partition support */
  53. /* Superblock */
  54. static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
  55. struct page **res)
  56. {
  57. const char *err;
  58. struct cache_sb *s;
  59. struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
  60. unsigned i;
  61. if (!bh)
  62. return "IO error";
  63. s = (struct cache_sb *) bh->b_data;
  64. sb->offset = le64_to_cpu(s->offset);
  65. sb->version = le64_to_cpu(s->version);
  66. memcpy(sb->magic, s->magic, 16);
  67. memcpy(sb->uuid, s->uuid, 16);
  68. memcpy(sb->set_uuid, s->set_uuid, 16);
  69. memcpy(sb->label, s->label, SB_LABEL_SIZE);
  70. sb->flags = le64_to_cpu(s->flags);
  71. sb->seq = le64_to_cpu(s->seq);
  72. sb->last_mount = le32_to_cpu(s->last_mount);
  73. sb->first_bucket = le16_to_cpu(s->first_bucket);
  74. sb->keys = le16_to_cpu(s->keys);
  75. for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
  76. sb->d[i] = le64_to_cpu(s->d[i]);
  77. pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
  78. sb->version, sb->flags, sb->seq, sb->keys);
  79. err = "Not a bcache superblock";
  80. if (sb->offset != SB_SECTOR)
  81. goto err;
  82. if (memcmp(sb->magic, bcache_magic, 16))
  83. goto err;
  84. err = "Too many journal buckets";
  85. if (sb->keys > SB_JOURNAL_BUCKETS)
  86. goto err;
  87. err = "Bad checksum";
  88. if (s->csum != csum_set(s))
  89. goto err;
  90. err = "Bad UUID";
  91. if (bch_is_zero(sb->uuid, 16))
  92. goto err;
  93. sb->block_size = le16_to_cpu(s->block_size);
  94. err = "Superblock block size smaller than device block size";
  95. if (sb->block_size << 9 < bdev_logical_block_size(bdev))
  96. goto err;
  97. switch (sb->version) {
  98. case BCACHE_SB_VERSION_BDEV:
  99. sb->data_offset = BDEV_DATA_START_DEFAULT;
  100. break;
  101. case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
  102. sb->data_offset = le64_to_cpu(s->data_offset);
  103. err = "Bad data offset";
  104. if (sb->data_offset < BDEV_DATA_START_DEFAULT)
  105. goto err;
  106. break;
  107. case BCACHE_SB_VERSION_CDEV:
  108. case BCACHE_SB_VERSION_CDEV_WITH_UUID:
  109. sb->nbuckets = le64_to_cpu(s->nbuckets);
  110. sb->bucket_size = le16_to_cpu(s->bucket_size);
  111. sb->nr_in_set = le16_to_cpu(s->nr_in_set);
  112. sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
  113. err = "Too many buckets";
  114. if (sb->nbuckets > LONG_MAX)
  115. goto err;
  116. err = "Not enough buckets";
  117. if (sb->nbuckets < 1 << 7)
  118. goto err;
  119. err = "Bad block/bucket size";
  120. if (!is_power_of_2(sb->block_size) ||
  121. sb->block_size > PAGE_SECTORS ||
  122. !is_power_of_2(sb->bucket_size) ||
  123. sb->bucket_size < PAGE_SECTORS)
  124. goto err;
  125. err = "Invalid superblock: device too small";
  126. if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
  127. goto err;
  128. err = "Bad UUID";
  129. if (bch_is_zero(sb->set_uuid, 16))
  130. goto err;
  131. err = "Bad cache device number in set";
  132. if (!sb->nr_in_set ||
  133. sb->nr_in_set <= sb->nr_this_dev ||
  134. sb->nr_in_set > MAX_CACHES_PER_SET)
  135. goto err;
  136. err = "Journal buckets not sequential";
  137. for (i = 0; i < sb->keys; i++)
  138. if (sb->d[i] != sb->first_bucket + i)
  139. goto err;
  140. err = "Too many journal buckets";
  141. if (sb->first_bucket + sb->keys > sb->nbuckets)
  142. goto err;
  143. err = "Invalid superblock: first bucket comes before end of super";
  144. if (sb->first_bucket * sb->bucket_size < 16)
  145. goto err;
  146. break;
  147. default:
  148. err = "Unsupported superblock version";
  149. goto err;
  150. }
  151. sb->last_mount = get_seconds();
  152. err = NULL;
  153. get_page(bh->b_page);
  154. *res = bh->b_page;
  155. err:
  156. put_bh(bh);
  157. return err;
  158. }
  159. static void write_bdev_super_endio(struct bio *bio)
  160. {
  161. struct cached_dev *dc = bio->bi_private;
  162. /* XXX: error checking */
  163. closure_put(&dc->sb_write);
  164. }
  165. static void __write_super(struct cache_sb *sb, struct bio *bio)
  166. {
  167. struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
  168. unsigned i;
  169. bio->bi_iter.bi_sector = SB_SECTOR;
  170. bio->bi_iter.bi_size = SB_SIZE;
  171. bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
  172. bch_bio_map(bio, NULL);
  173. out->offset = cpu_to_le64(sb->offset);
  174. out->version = cpu_to_le64(sb->version);
  175. memcpy(out->uuid, sb->uuid, 16);
  176. memcpy(out->set_uuid, sb->set_uuid, 16);
  177. memcpy(out->label, sb->label, SB_LABEL_SIZE);
  178. out->flags = cpu_to_le64(sb->flags);
  179. out->seq = cpu_to_le64(sb->seq);
  180. out->last_mount = cpu_to_le32(sb->last_mount);
  181. out->first_bucket = cpu_to_le16(sb->first_bucket);
  182. out->keys = cpu_to_le16(sb->keys);
  183. for (i = 0; i < sb->keys; i++)
  184. out->d[i] = cpu_to_le64(sb->d[i]);
  185. out->csum = csum_set(out);
  186. pr_debug("ver %llu, flags %llu, seq %llu",
  187. sb->version, sb->flags, sb->seq);
  188. submit_bio(bio);
  189. }
  190. static void bch_write_bdev_super_unlock(struct closure *cl)
  191. {
  192. struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
  193. up(&dc->sb_write_mutex);
  194. }
  195. void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
  196. {
  197. struct closure *cl = &dc->sb_write;
  198. struct bio *bio = &dc->sb_bio;
  199. down(&dc->sb_write_mutex);
  200. closure_init(cl, parent);
  201. bio_reset(bio);
  202. bio->bi_bdev = dc->bdev;
  203. bio->bi_end_io = write_bdev_super_endio;
  204. bio->bi_private = dc;
  205. closure_get(cl);
  206. __write_super(&dc->sb, bio);
  207. closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
  208. }
  209. static void write_super_endio(struct bio *bio)
  210. {
  211. struct cache *ca = bio->bi_private;
  212. bch_count_io_errors(ca, bio->bi_error, "writing superblock");
  213. closure_put(&ca->set->sb_write);
  214. }
  215. static void bcache_write_super_unlock(struct closure *cl)
  216. {
  217. struct cache_set *c = container_of(cl, struct cache_set, sb_write);
  218. up(&c->sb_write_mutex);
  219. }
  220. void bcache_write_super(struct cache_set *c)
  221. {
  222. struct closure *cl = &c->sb_write;
  223. struct cache *ca;
  224. unsigned i;
  225. down(&c->sb_write_mutex);
  226. closure_init(cl, &c->cl);
  227. c->sb.seq++;
  228. for_each_cache(ca, c, i) {
  229. struct bio *bio = &ca->sb_bio;
  230. ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
  231. ca->sb.seq = c->sb.seq;
  232. ca->sb.last_mount = c->sb.last_mount;
  233. SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
  234. bio_reset(bio);
  235. bio->bi_bdev = ca->bdev;
  236. bio->bi_end_io = write_super_endio;
  237. bio->bi_private = ca;
  238. closure_get(cl);
  239. __write_super(&ca->sb, bio);
  240. }
  241. closure_return_with_destructor(cl, bcache_write_super_unlock);
  242. }
  243. /* UUID io */
  244. static void uuid_endio(struct bio *bio)
  245. {
  246. struct closure *cl = bio->bi_private;
  247. struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
  248. cache_set_err_on(bio->bi_error, c, "accessing uuids");
  249. bch_bbio_free(bio, c);
  250. closure_put(cl);
  251. }
  252. static void uuid_io_unlock(struct closure *cl)
  253. {
  254. struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
  255. up(&c->uuid_write_mutex);
  256. }
  257. static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
  258. struct bkey *k, struct closure *parent)
  259. {
  260. struct closure *cl = &c->uuid_write;
  261. struct uuid_entry *u;
  262. unsigned i;
  263. char buf[80];
  264. BUG_ON(!parent);
  265. down(&c->uuid_write_mutex);
  266. closure_init(cl, parent);
  267. for (i = 0; i < KEY_PTRS(k); i++) {
  268. struct bio *bio = bch_bbio_alloc(c);
  269. bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
  270. bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
  271. bio->bi_end_io = uuid_endio;
  272. bio->bi_private = cl;
  273. bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
  274. bch_bio_map(bio, c->uuids);
  275. bch_submit_bbio(bio, c, k, i);
  276. if (op != REQ_OP_WRITE)
  277. break;
  278. }
  279. bch_extent_to_text(buf, sizeof(buf), k);
  280. pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
  281. for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
  282. if (!bch_is_zero(u->uuid, 16))
  283. pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
  284. u - c->uuids, u->uuid, u->label,
  285. u->first_reg, u->last_reg, u->invalidated);
  286. closure_return_with_destructor(cl, uuid_io_unlock);
  287. }
  288. static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
  289. {
  290. struct bkey *k = &j->uuid_bucket;
  291. if (__bch_btree_ptr_invalid(c, k))
  292. return "bad uuid pointer";
  293. bkey_copy(&c->uuid_bucket, k);
  294. uuid_io(c, REQ_OP_READ, 0, k, cl);
  295. if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
  296. struct uuid_entry_v0 *u0 = (void *) c->uuids;
  297. struct uuid_entry *u1 = (void *) c->uuids;
  298. int i;
  299. closure_sync(cl);
  300. /*
  301. * Since the new uuid entry is bigger than the old, we have to
  302. * convert starting at the highest memory address and work down
  303. * in order to do it in place
  304. */
  305. for (i = c->nr_uuids - 1;
  306. i >= 0;
  307. --i) {
  308. memcpy(u1[i].uuid, u0[i].uuid, 16);
  309. memcpy(u1[i].label, u0[i].label, 32);
  310. u1[i].first_reg = u0[i].first_reg;
  311. u1[i].last_reg = u0[i].last_reg;
  312. u1[i].invalidated = u0[i].invalidated;
  313. u1[i].flags = 0;
  314. u1[i].sectors = 0;
  315. }
  316. }
  317. return NULL;
  318. }
  319. static int __uuid_write(struct cache_set *c)
  320. {
  321. BKEY_PADDED(key) k;
  322. struct closure cl;
  323. closure_init_stack(&cl);
  324. lockdep_assert_held(&bch_register_lock);
  325. if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
  326. return 1;
  327. SET_KEY_SIZE(&k.key, c->sb.bucket_size);
  328. uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
  329. closure_sync(&cl);
  330. bkey_copy(&c->uuid_bucket, &k.key);
  331. bkey_put(c, &k.key);
  332. return 0;
  333. }
  334. int bch_uuid_write(struct cache_set *c)
  335. {
  336. int ret = __uuid_write(c);
  337. if (!ret)
  338. bch_journal_meta(c, NULL);
  339. return ret;
  340. }
  341. static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
  342. {
  343. struct uuid_entry *u;
  344. for (u = c->uuids;
  345. u < c->uuids + c->nr_uuids; u++)
  346. if (!memcmp(u->uuid, uuid, 16))
  347. return u;
  348. return NULL;
  349. }
  350. static struct uuid_entry *uuid_find_empty(struct cache_set *c)
  351. {
  352. static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
  353. return uuid_find(c, zero_uuid);
  354. }
  355. /*
  356. * Bucket priorities/gens:
  357. *
  358. * For each bucket, we store on disk its
  359. * 8 bit gen
  360. * 16 bit priority
  361. *
  362. * See alloc.c for an explanation of the gen. The priority is used to implement
  363. * lru (and in the future other) cache replacement policies; for most purposes
  364. * it's just an opaque integer.
  365. *
  366. * The gens and the priorities don't have a whole lot to do with each other, and
  367. * it's actually the gens that must be written out at specific times - it's no
  368. * big deal if the priorities don't get written, if we lose them we just reuse
  369. * buckets in suboptimal order.
  370. *
  371. * On disk they're stored in a packed array, and in as many buckets are required
  372. * to fit them all. The buckets we use to store them form a list; the journal
  373. * header points to the first bucket, the first bucket points to the second
  374. * bucket, et cetera.
  375. *
  376. * This code is used by the allocation code; periodically (whenever it runs out
  377. * of buckets to allocate from) the allocation code will invalidate some
  378. * buckets, but it can't use those buckets until their new gens are safely on
  379. * disk.
  380. */
  381. static void prio_endio(struct bio *bio)
  382. {
  383. struct cache *ca = bio->bi_private;
  384. cache_set_err_on(bio->bi_error, ca->set, "accessing priorities");
  385. bch_bbio_free(bio, ca->set);
  386. closure_put(&ca->prio);
  387. }
  388. static void prio_io(struct cache *ca, uint64_t bucket, int op,
  389. unsigned long op_flags)
  390. {
  391. struct closure *cl = &ca->prio;
  392. struct bio *bio = bch_bbio_alloc(ca->set);
  393. closure_init_stack(cl);
  394. bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
  395. bio->bi_bdev = ca->bdev;
  396. bio->bi_iter.bi_size = bucket_bytes(ca);
  397. bio->bi_end_io = prio_endio;
  398. bio->bi_private = ca;
  399. bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
  400. bch_bio_map(bio, ca->disk_buckets);
  401. closure_bio_submit(bio, &ca->prio);
  402. closure_sync(cl);
  403. }
  404. void bch_prio_write(struct cache *ca)
  405. {
  406. int i;
  407. struct bucket *b;
  408. struct closure cl;
  409. closure_init_stack(&cl);
  410. lockdep_assert_held(&ca->set->bucket_lock);
  411. ca->disk_buckets->seq++;
  412. atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
  413. &ca->meta_sectors_written);
  414. //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
  415. // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
  416. for (i = prio_buckets(ca) - 1; i >= 0; --i) {
  417. long bucket;
  418. struct prio_set *p = ca->disk_buckets;
  419. struct bucket_disk *d = p->data;
  420. struct bucket_disk *end = d + prios_per_bucket(ca);
  421. for (b = ca->buckets + i * prios_per_bucket(ca);
  422. b < ca->buckets + ca->sb.nbuckets && d < end;
  423. b++, d++) {
  424. d->prio = cpu_to_le16(b->prio);
  425. d->gen = b->gen;
  426. }
  427. p->next_bucket = ca->prio_buckets[i + 1];
  428. p->magic = pset_magic(&ca->sb);
  429. p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
  430. bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
  431. BUG_ON(bucket == -1);
  432. mutex_unlock(&ca->set->bucket_lock);
  433. prio_io(ca, bucket, REQ_OP_WRITE, 0);
  434. mutex_lock(&ca->set->bucket_lock);
  435. ca->prio_buckets[i] = bucket;
  436. atomic_dec_bug(&ca->buckets[bucket].pin);
  437. }
  438. mutex_unlock(&ca->set->bucket_lock);
  439. bch_journal_meta(ca->set, &cl);
  440. closure_sync(&cl);
  441. mutex_lock(&ca->set->bucket_lock);
  442. /*
  443. * Don't want the old priorities to get garbage collected until after we
  444. * finish writing the new ones, and they're journalled
  445. */
  446. for (i = 0; i < prio_buckets(ca); i++) {
  447. if (ca->prio_last_buckets[i])
  448. __bch_bucket_free(ca,
  449. &ca->buckets[ca->prio_last_buckets[i]]);
  450. ca->prio_last_buckets[i] = ca->prio_buckets[i];
  451. }
  452. }
  453. static void prio_read(struct cache *ca, uint64_t bucket)
  454. {
  455. struct prio_set *p = ca->disk_buckets;
  456. struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
  457. struct bucket *b;
  458. unsigned bucket_nr = 0;
  459. for (b = ca->buckets;
  460. b < ca->buckets + ca->sb.nbuckets;
  461. b++, d++) {
  462. if (d == end) {
  463. ca->prio_buckets[bucket_nr] = bucket;
  464. ca->prio_last_buckets[bucket_nr] = bucket;
  465. bucket_nr++;
  466. prio_io(ca, bucket, REQ_OP_READ, 0);
  467. if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
  468. pr_warn("bad csum reading priorities");
  469. if (p->magic != pset_magic(&ca->sb))
  470. pr_warn("bad magic reading priorities");
  471. bucket = p->next_bucket;
  472. d = p->data;
  473. }
  474. b->prio = le16_to_cpu(d->prio);
  475. b->gen = b->last_gc = d->gen;
  476. }
  477. }
  478. /* Bcache device */
  479. static int open_dev(struct block_device *b, fmode_t mode)
  480. {
  481. struct bcache_device *d = b->bd_disk->private_data;
  482. if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
  483. return -ENXIO;
  484. closure_get(&d->cl);
  485. return 0;
  486. }
  487. static void release_dev(struct gendisk *b, fmode_t mode)
  488. {
  489. struct bcache_device *d = b->private_data;
  490. closure_put(&d->cl);
  491. }
  492. static int ioctl_dev(struct block_device *b, fmode_t mode,
  493. unsigned int cmd, unsigned long arg)
  494. {
  495. struct bcache_device *d = b->bd_disk->private_data;
  496. return d->ioctl(d, mode, cmd, arg);
  497. }
  498. static const struct block_device_operations bcache_ops = {
  499. .open = open_dev,
  500. .release = release_dev,
  501. .ioctl = ioctl_dev,
  502. .owner = THIS_MODULE,
  503. };
  504. void bcache_device_stop(struct bcache_device *d)
  505. {
  506. if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
  507. closure_queue(&d->cl);
  508. }
  509. static void bcache_device_unlink(struct bcache_device *d)
  510. {
  511. lockdep_assert_held(&bch_register_lock);
  512. if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
  513. unsigned i;
  514. struct cache *ca;
  515. sysfs_remove_link(&d->c->kobj, d->name);
  516. sysfs_remove_link(&d->kobj, "cache");
  517. for_each_cache(ca, d->c, i)
  518. bd_unlink_disk_holder(ca->bdev, d->disk);
  519. }
  520. }
  521. static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
  522. const char *name)
  523. {
  524. unsigned i;
  525. struct cache *ca;
  526. for_each_cache(ca, d->c, i)
  527. bd_link_disk_holder(ca->bdev, d->disk);
  528. snprintf(d->name, BCACHEDEVNAME_SIZE,
  529. "%s%u", name, d->id);
  530. WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
  531. sysfs_create_link(&c->kobj, &d->kobj, d->name),
  532. "Couldn't create device <-> cache set symlinks");
  533. clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
  534. }
  535. static void bcache_device_detach(struct bcache_device *d)
  536. {
  537. lockdep_assert_held(&bch_register_lock);
  538. if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
  539. struct uuid_entry *u = d->c->uuids + d->id;
  540. SET_UUID_FLASH_ONLY(u, 0);
  541. memcpy(u->uuid, invalid_uuid, 16);
  542. u->invalidated = cpu_to_le32(get_seconds());
  543. bch_uuid_write(d->c);
  544. }
  545. bcache_device_unlink(d);
  546. d->c->devices[d->id] = NULL;
  547. closure_put(&d->c->caching);
  548. d->c = NULL;
  549. }
  550. static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
  551. unsigned id)
  552. {
  553. d->id = id;
  554. d->c = c;
  555. c->devices[id] = d;
  556. closure_get(&c->caching);
  557. }
  558. static void bcache_device_free(struct bcache_device *d)
  559. {
  560. lockdep_assert_held(&bch_register_lock);
  561. pr_info("%s stopped", d->disk->disk_name);
  562. if (d->c)
  563. bcache_device_detach(d);
  564. if (d->disk && d->disk->flags & GENHD_FL_UP)
  565. del_gendisk(d->disk);
  566. if (d->disk && d->disk->queue)
  567. blk_cleanup_queue(d->disk->queue);
  568. if (d->disk) {
  569. ida_simple_remove(&bcache_minor, d->disk->first_minor);
  570. put_disk(d->disk);
  571. }
  572. if (d->bio_split)
  573. bioset_free(d->bio_split);
  574. kvfree(d->full_dirty_stripes);
  575. kvfree(d->stripe_sectors_dirty);
  576. closure_debug_destroy(&d->cl);
  577. }
  578. static int bcache_device_init(struct bcache_device *d, unsigned block_size,
  579. sector_t sectors)
  580. {
  581. struct request_queue *q;
  582. size_t n;
  583. int minor;
  584. if (!d->stripe_size)
  585. d->stripe_size = 1 << 31;
  586. d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
  587. if (!d->nr_stripes ||
  588. d->nr_stripes > INT_MAX ||
  589. d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
  590. pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
  591. (unsigned)d->nr_stripes);
  592. return -ENOMEM;
  593. }
  594. n = d->nr_stripes * sizeof(atomic_t);
  595. d->stripe_sectors_dirty = n < PAGE_SIZE << 6
  596. ? kzalloc(n, GFP_KERNEL)
  597. : vzalloc(n);
  598. if (!d->stripe_sectors_dirty)
  599. return -ENOMEM;
  600. n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
  601. d->full_dirty_stripes = n < PAGE_SIZE << 6
  602. ? kzalloc(n, GFP_KERNEL)
  603. : vzalloc(n);
  604. if (!d->full_dirty_stripes)
  605. return -ENOMEM;
  606. minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
  607. if (minor < 0)
  608. return minor;
  609. minor *= BCACHE_MINORS;
  610. if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
  611. !(d->disk = alloc_disk(BCACHE_MINORS))) {
  612. ida_simple_remove(&bcache_minor, minor);
  613. return -ENOMEM;
  614. }
  615. set_capacity(d->disk, sectors);
  616. snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
  617. d->disk->major = bcache_major;
  618. d->disk->first_minor = minor;
  619. d->disk->fops = &bcache_ops;
  620. d->disk->private_data = d;
  621. q = blk_alloc_queue(GFP_KERNEL);
  622. if (!q)
  623. return -ENOMEM;
  624. blk_queue_make_request(q, NULL);
  625. d->disk->queue = q;
  626. q->queuedata = d;
  627. q->backing_dev_info.congested_data = d;
  628. q->limits.max_hw_sectors = UINT_MAX;
  629. q->limits.max_sectors = UINT_MAX;
  630. q->limits.max_segment_size = UINT_MAX;
  631. q->limits.max_segments = BIO_MAX_PAGES;
  632. blk_queue_max_discard_sectors(q, UINT_MAX);
  633. q->limits.discard_granularity = 512;
  634. q->limits.io_min = block_size;
  635. q->limits.logical_block_size = block_size;
  636. q->limits.physical_block_size = block_size;
  637. set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
  638. clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags);
  639. set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
  640. blk_queue_write_cache(q, true, true);
  641. return 0;
  642. }
  643. /* Cached device */
  644. static void calc_cached_dev_sectors(struct cache_set *c)
  645. {
  646. uint64_t sectors = 0;
  647. struct cached_dev *dc;
  648. list_for_each_entry(dc, &c->cached_devs, list)
  649. sectors += bdev_sectors(dc->bdev);
  650. c->cached_dev_sectors = sectors;
  651. }
  652. void bch_cached_dev_run(struct cached_dev *dc)
  653. {
  654. struct bcache_device *d = &dc->disk;
  655. char buf[SB_LABEL_SIZE + 1];
  656. char *env[] = {
  657. "DRIVER=bcache",
  658. kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
  659. NULL,
  660. NULL,
  661. };
  662. memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
  663. buf[SB_LABEL_SIZE] = '\0';
  664. env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
  665. if (atomic_xchg(&dc->running, 1)) {
  666. kfree(env[1]);
  667. kfree(env[2]);
  668. return;
  669. }
  670. if (!d->c &&
  671. BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
  672. struct closure cl;
  673. closure_init_stack(&cl);
  674. SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
  675. bch_write_bdev_super(dc, &cl);
  676. closure_sync(&cl);
  677. }
  678. add_disk(d->disk);
  679. bd_link_disk_holder(dc->bdev, dc->disk.disk);
  680. /* won't show up in the uevent file, use udevadm monitor -e instead
  681. * only class / kset properties are persistent */
  682. kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
  683. kfree(env[1]);
  684. kfree(env[2]);
  685. if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
  686. sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
  687. pr_debug("error creating sysfs link");
  688. }
  689. static void cached_dev_detach_finish(struct work_struct *w)
  690. {
  691. struct cached_dev *dc = container_of(w, struct cached_dev, detach);
  692. char buf[BDEVNAME_SIZE];
  693. struct closure cl;
  694. closure_init_stack(&cl);
  695. BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
  696. BUG_ON(atomic_read(&dc->count));
  697. mutex_lock(&bch_register_lock);
  698. memset(&dc->sb.set_uuid, 0, 16);
  699. SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
  700. bch_write_bdev_super(dc, &cl);
  701. closure_sync(&cl);
  702. bcache_device_detach(&dc->disk);
  703. list_move(&dc->list, &uncached_devices);
  704. clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
  705. clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
  706. mutex_unlock(&bch_register_lock);
  707. pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
  708. /* Drop ref we took in cached_dev_detach() */
  709. closure_put(&dc->disk.cl);
  710. }
  711. void bch_cached_dev_detach(struct cached_dev *dc)
  712. {
  713. lockdep_assert_held(&bch_register_lock);
  714. if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
  715. return;
  716. if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
  717. return;
  718. /*
  719. * Block the device from being closed and freed until we're finished
  720. * detaching
  721. */
  722. closure_get(&dc->disk.cl);
  723. bch_writeback_queue(dc);
  724. cached_dev_put(dc);
  725. }
  726. int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
  727. {
  728. uint32_t rtime = cpu_to_le32(get_seconds());
  729. struct uuid_entry *u;
  730. char buf[BDEVNAME_SIZE];
  731. bdevname(dc->bdev, buf);
  732. if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
  733. return -ENOENT;
  734. if (dc->disk.c) {
  735. pr_err("Can't attach %s: already attached", buf);
  736. return -EINVAL;
  737. }
  738. if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
  739. pr_err("Can't attach %s: shutting down", buf);
  740. return -EINVAL;
  741. }
  742. if (dc->sb.block_size < c->sb.block_size) {
  743. /* Will die */
  744. pr_err("Couldn't attach %s: block size less than set's block size",
  745. buf);
  746. return -EINVAL;
  747. }
  748. u = uuid_find(c, dc->sb.uuid);
  749. if (u &&
  750. (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
  751. BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
  752. memcpy(u->uuid, invalid_uuid, 16);
  753. u->invalidated = cpu_to_le32(get_seconds());
  754. u = NULL;
  755. }
  756. if (!u) {
  757. if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
  758. pr_err("Couldn't find uuid for %s in set", buf);
  759. return -ENOENT;
  760. }
  761. u = uuid_find_empty(c);
  762. if (!u) {
  763. pr_err("Not caching %s, no room for UUID", buf);
  764. return -EINVAL;
  765. }
  766. }
  767. /* Deadlocks since we're called via sysfs...
  768. sysfs_remove_file(&dc->kobj, &sysfs_attach);
  769. */
  770. if (bch_is_zero(u->uuid, 16)) {
  771. struct closure cl;
  772. closure_init_stack(&cl);
  773. memcpy(u->uuid, dc->sb.uuid, 16);
  774. memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
  775. u->first_reg = u->last_reg = rtime;
  776. bch_uuid_write(c);
  777. memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
  778. SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
  779. bch_write_bdev_super(dc, &cl);
  780. closure_sync(&cl);
  781. } else {
  782. u->last_reg = rtime;
  783. bch_uuid_write(c);
  784. }
  785. bcache_device_attach(&dc->disk, c, u - c->uuids);
  786. list_move(&dc->list, &c->cached_devs);
  787. calc_cached_dev_sectors(c);
  788. smp_wmb();
  789. /*
  790. * dc->c must be set before dc->count != 0 - paired with the mb in
  791. * cached_dev_get()
  792. */
  793. atomic_set(&dc->count, 1);
  794. /* Block writeback thread, but spawn it */
  795. down_write(&dc->writeback_lock);
  796. if (bch_cached_dev_writeback_start(dc)) {
  797. up_write(&dc->writeback_lock);
  798. return -ENOMEM;
  799. }
  800. if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
  801. bch_sectors_dirty_init(dc);
  802. atomic_set(&dc->has_dirty, 1);
  803. atomic_inc(&dc->count);
  804. bch_writeback_queue(dc);
  805. }
  806. bch_cached_dev_run(dc);
  807. bcache_device_link(&dc->disk, c, "bdev");
  808. /* Allow the writeback thread to proceed */
  809. up_write(&dc->writeback_lock);
  810. pr_info("Caching %s as %s on set %pU",
  811. bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
  812. dc->disk.c->sb.set_uuid);
  813. return 0;
  814. }
  815. void bch_cached_dev_release(struct kobject *kobj)
  816. {
  817. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  818. disk.kobj);
  819. kfree(dc);
  820. module_put(THIS_MODULE);
  821. }
  822. static void cached_dev_free(struct closure *cl)
  823. {
  824. struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
  825. cancel_delayed_work_sync(&dc->writeback_rate_update);
  826. if (!IS_ERR_OR_NULL(dc->writeback_thread))
  827. kthread_stop(dc->writeback_thread);
  828. mutex_lock(&bch_register_lock);
  829. if (atomic_read(&dc->running))
  830. bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
  831. bcache_device_free(&dc->disk);
  832. list_del(&dc->list);
  833. mutex_unlock(&bch_register_lock);
  834. if (!IS_ERR_OR_NULL(dc->bdev))
  835. blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  836. wake_up(&unregister_wait);
  837. kobject_put(&dc->disk.kobj);
  838. }
  839. static void cached_dev_flush(struct closure *cl)
  840. {
  841. struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
  842. struct bcache_device *d = &dc->disk;
  843. mutex_lock(&bch_register_lock);
  844. bcache_device_unlink(d);
  845. mutex_unlock(&bch_register_lock);
  846. bch_cache_accounting_destroy(&dc->accounting);
  847. kobject_del(&d->kobj);
  848. continue_at(cl, cached_dev_free, system_wq);
  849. }
  850. static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
  851. {
  852. int ret;
  853. struct io *io;
  854. struct request_queue *q = bdev_get_queue(dc->bdev);
  855. __module_get(THIS_MODULE);
  856. INIT_LIST_HEAD(&dc->list);
  857. closure_init(&dc->disk.cl, NULL);
  858. set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
  859. kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
  860. INIT_WORK(&dc->detach, cached_dev_detach_finish);
  861. sema_init(&dc->sb_write_mutex, 1);
  862. INIT_LIST_HEAD(&dc->io_lru);
  863. spin_lock_init(&dc->io_lock);
  864. bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
  865. dc->sequential_cutoff = 4 << 20;
  866. for (io = dc->io; io < dc->io + RECENT_IO; io++) {
  867. list_add(&io->lru, &dc->io_lru);
  868. hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
  869. }
  870. dc->disk.stripe_size = q->limits.io_opt >> 9;
  871. if (dc->disk.stripe_size)
  872. dc->partial_stripes_expensive =
  873. q->limits.raid_partial_stripes_expensive;
  874. ret = bcache_device_init(&dc->disk, block_size,
  875. dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
  876. if (ret)
  877. return ret;
  878. set_capacity(dc->disk.disk,
  879. dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
  880. dc->disk.disk->queue->backing_dev_info.ra_pages =
  881. max(dc->disk.disk->queue->backing_dev_info.ra_pages,
  882. q->backing_dev_info.ra_pages);
  883. bch_cached_dev_request_init(dc);
  884. bch_cached_dev_writeback_init(dc);
  885. return 0;
  886. }
  887. /* Cached device - bcache superblock */
  888. static void register_bdev(struct cache_sb *sb, struct page *sb_page,
  889. struct block_device *bdev,
  890. struct cached_dev *dc)
  891. {
  892. char name[BDEVNAME_SIZE];
  893. const char *err = "cannot allocate memory";
  894. struct cache_set *c;
  895. memcpy(&dc->sb, sb, sizeof(struct cache_sb));
  896. dc->bdev = bdev;
  897. dc->bdev->bd_holder = dc;
  898. bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
  899. dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
  900. get_page(sb_page);
  901. if (cached_dev_init(dc, sb->block_size << 9))
  902. goto err;
  903. err = "error creating kobject";
  904. if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
  905. "bcache"))
  906. goto err;
  907. if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
  908. goto err;
  909. pr_info("registered backing device %s", bdevname(bdev, name));
  910. list_add(&dc->list, &uncached_devices);
  911. list_for_each_entry(c, &bch_cache_sets, list)
  912. bch_cached_dev_attach(dc, c);
  913. if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
  914. BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
  915. bch_cached_dev_run(dc);
  916. return;
  917. err:
  918. pr_notice("error opening %s: %s", bdevname(bdev, name), err);
  919. bcache_device_stop(&dc->disk);
  920. }
  921. /* Flash only volumes */
  922. void bch_flash_dev_release(struct kobject *kobj)
  923. {
  924. struct bcache_device *d = container_of(kobj, struct bcache_device,
  925. kobj);
  926. kfree(d);
  927. }
  928. static void flash_dev_free(struct closure *cl)
  929. {
  930. struct bcache_device *d = container_of(cl, struct bcache_device, cl);
  931. mutex_lock(&bch_register_lock);
  932. bcache_device_free(d);
  933. mutex_unlock(&bch_register_lock);
  934. kobject_put(&d->kobj);
  935. }
  936. static void flash_dev_flush(struct closure *cl)
  937. {
  938. struct bcache_device *d = container_of(cl, struct bcache_device, cl);
  939. mutex_lock(&bch_register_lock);
  940. bcache_device_unlink(d);
  941. mutex_unlock(&bch_register_lock);
  942. kobject_del(&d->kobj);
  943. continue_at(cl, flash_dev_free, system_wq);
  944. }
  945. static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
  946. {
  947. struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
  948. GFP_KERNEL);
  949. if (!d)
  950. return -ENOMEM;
  951. closure_init(&d->cl, NULL);
  952. set_closure_fn(&d->cl, flash_dev_flush, system_wq);
  953. kobject_init(&d->kobj, &bch_flash_dev_ktype);
  954. if (bcache_device_init(d, block_bytes(c), u->sectors))
  955. goto err;
  956. bcache_device_attach(d, c, u - c->uuids);
  957. bch_flash_dev_request_init(d);
  958. add_disk(d->disk);
  959. if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
  960. goto err;
  961. bcache_device_link(d, c, "volume");
  962. return 0;
  963. err:
  964. kobject_put(&d->kobj);
  965. return -ENOMEM;
  966. }
  967. static int flash_devs_run(struct cache_set *c)
  968. {
  969. int ret = 0;
  970. struct uuid_entry *u;
  971. for (u = c->uuids;
  972. u < c->uuids + c->nr_uuids && !ret;
  973. u++)
  974. if (UUID_FLASH_ONLY(u))
  975. ret = flash_dev_run(c, u);
  976. return ret;
  977. }
  978. int bch_flash_dev_create(struct cache_set *c, uint64_t size)
  979. {
  980. struct uuid_entry *u;
  981. if (test_bit(CACHE_SET_STOPPING, &c->flags))
  982. return -EINTR;
  983. if (!test_bit(CACHE_SET_RUNNING, &c->flags))
  984. return -EPERM;
  985. u = uuid_find_empty(c);
  986. if (!u) {
  987. pr_err("Can't create volume, no room for UUID");
  988. return -EINVAL;
  989. }
  990. get_random_bytes(u->uuid, 16);
  991. memset(u->label, 0, 32);
  992. u->first_reg = u->last_reg = cpu_to_le32(get_seconds());
  993. SET_UUID_FLASH_ONLY(u, 1);
  994. u->sectors = size >> 9;
  995. bch_uuid_write(c);
  996. return flash_dev_run(c, u);
  997. }
  998. /* Cache set */
  999. __printf(2, 3)
  1000. bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
  1001. {
  1002. va_list args;
  1003. if (c->on_error != ON_ERROR_PANIC &&
  1004. test_bit(CACHE_SET_STOPPING, &c->flags))
  1005. return false;
  1006. /* XXX: we can be called from atomic context
  1007. acquire_console_sem();
  1008. */
  1009. printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
  1010. va_start(args, fmt);
  1011. vprintk(fmt, args);
  1012. va_end(args);
  1013. printk(", disabling caching\n");
  1014. if (c->on_error == ON_ERROR_PANIC)
  1015. panic("panic forced after error\n");
  1016. bch_cache_set_unregister(c);
  1017. return true;
  1018. }
  1019. void bch_cache_set_release(struct kobject *kobj)
  1020. {
  1021. struct cache_set *c = container_of(kobj, struct cache_set, kobj);
  1022. kfree(c);
  1023. module_put(THIS_MODULE);
  1024. }
  1025. static void cache_set_free(struct closure *cl)
  1026. {
  1027. struct cache_set *c = container_of(cl, struct cache_set, cl);
  1028. struct cache *ca;
  1029. unsigned i;
  1030. if (!IS_ERR_OR_NULL(c->debug))
  1031. debugfs_remove(c->debug);
  1032. bch_open_buckets_free(c);
  1033. bch_btree_cache_free(c);
  1034. bch_journal_free(c);
  1035. for_each_cache(ca, c, i)
  1036. if (ca) {
  1037. ca->set = NULL;
  1038. c->cache[ca->sb.nr_this_dev] = NULL;
  1039. kobject_put(&ca->kobj);
  1040. }
  1041. bch_bset_sort_state_free(&c->sort);
  1042. free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
  1043. if (c->moving_gc_wq)
  1044. destroy_workqueue(c->moving_gc_wq);
  1045. if (c->bio_split)
  1046. bioset_free(c->bio_split);
  1047. if (c->fill_iter)
  1048. mempool_destroy(c->fill_iter);
  1049. if (c->bio_meta)
  1050. mempool_destroy(c->bio_meta);
  1051. if (c->search)
  1052. mempool_destroy(c->search);
  1053. kfree(c->devices);
  1054. mutex_lock(&bch_register_lock);
  1055. list_del(&c->list);
  1056. mutex_unlock(&bch_register_lock);
  1057. pr_info("Cache set %pU unregistered", c->sb.set_uuid);
  1058. wake_up(&unregister_wait);
  1059. closure_debug_destroy(&c->cl);
  1060. kobject_put(&c->kobj);
  1061. }
  1062. static void cache_set_flush(struct closure *cl)
  1063. {
  1064. struct cache_set *c = container_of(cl, struct cache_set, caching);
  1065. struct cache *ca;
  1066. struct btree *b;
  1067. unsigned i;
  1068. if (!c)
  1069. closure_return(cl);
  1070. bch_cache_accounting_destroy(&c->accounting);
  1071. kobject_put(&c->internal);
  1072. kobject_del(&c->kobj);
  1073. if (c->gc_thread)
  1074. kthread_stop(c->gc_thread);
  1075. if (!IS_ERR_OR_NULL(c->root))
  1076. list_add(&c->root->list, &c->btree_cache);
  1077. /* Should skip this if we're unregistering because of an error */
  1078. list_for_each_entry(b, &c->btree_cache, list) {
  1079. mutex_lock(&b->write_lock);
  1080. if (btree_node_dirty(b))
  1081. __bch_btree_node_write(b, NULL);
  1082. mutex_unlock(&b->write_lock);
  1083. }
  1084. for_each_cache(ca, c, i)
  1085. if (ca->alloc_thread)
  1086. kthread_stop(ca->alloc_thread);
  1087. if (c->journal.cur) {
  1088. cancel_delayed_work_sync(&c->journal.work);
  1089. /* flush last journal entry if needed */
  1090. c->journal.work.work.func(&c->journal.work.work);
  1091. }
  1092. closure_return(cl);
  1093. }
  1094. static void __cache_set_unregister(struct closure *cl)
  1095. {
  1096. struct cache_set *c = container_of(cl, struct cache_set, caching);
  1097. struct cached_dev *dc;
  1098. size_t i;
  1099. mutex_lock(&bch_register_lock);
  1100. for (i = 0; i < c->nr_uuids; i++)
  1101. if (c->devices[i]) {
  1102. if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
  1103. test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
  1104. dc = container_of(c->devices[i],
  1105. struct cached_dev, disk);
  1106. bch_cached_dev_detach(dc);
  1107. } else {
  1108. bcache_device_stop(c->devices[i]);
  1109. }
  1110. }
  1111. mutex_unlock(&bch_register_lock);
  1112. continue_at(cl, cache_set_flush, system_wq);
  1113. }
  1114. void bch_cache_set_stop(struct cache_set *c)
  1115. {
  1116. if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
  1117. closure_queue(&c->caching);
  1118. }
  1119. void bch_cache_set_unregister(struct cache_set *c)
  1120. {
  1121. set_bit(CACHE_SET_UNREGISTERING, &c->flags);
  1122. bch_cache_set_stop(c);
  1123. }
  1124. #define alloc_bucket_pages(gfp, c) \
  1125. ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
  1126. struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
  1127. {
  1128. int iter_size;
  1129. struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
  1130. if (!c)
  1131. return NULL;
  1132. __module_get(THIS_MODULE);
  1133. closure_init(&c->cl, NULL);
  1134. set_closure_fn(&c->cl, cache_set_free, system_wq);
  1135. closure_init(&c->caching, &c->cl);
  1136. set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
  1137. /* Maybe create continue_at_noreturn() and use it here? */
  1138. closure_set_stopped(&c->cl);
  1139. closure_put(&c->cl);
  1140. kobject_init(&c->kobj, &bch_cache_set_ktype);
  1141. kobject_init(&c->internal, &bch_cache_set_internal_ktype);
  1142. bch_cache_accounting_init(&c->accounting, &c->cl);
  1143. memcpy(c->sb.set_uuid, sb->set_uuid, 16);
  1144. c->sb.block_size = sb->block_size;
  1145. c->sb.bucket_size = sb->bucket_size;
  1146. c->sb.nr_in_set = sb->nr_in_set;
  1147. c->sb.last_mount = sb->last_mount;
  1148. c->bucket_bits = ilog2(sb->bucket_size);
  1149. c->block_bits = ilog2(sb->block_size);
  1150. c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
  1151. c->btree_pages = bucket_pages(c);
  1152. if (c->btree_pages > BTREE_MAX_PAGES)
  1153. c->btree_pages = max_t(int, c->btree_pages / 4,
  1154. BTREE_MAX_PAGES);
  1155. sema_init(&c->sb_write_mutex, 1);
  1156. mutex_init(&c->bucket_lock);
  1157. init_waitqueue_head(&c->btree_cache_wait);
  1158. init_waitqueue_head(&c->bucket_wait);
  1159. init_waitqueue_head(&c->gc_wait);
  1160. sema_init(&c->uuid_write_mutex, 1);
  1161. spin_lock_init(&c->btree_gc_time.lock);
  1162. spin_lock_init(&c->btree_split_time.lock);
  1163. spin_lock_init(&c->btree_read_time.lock);
  1164. bch_moving_init_cache_set(c);
  1165. INIT_LIST_HEAD(&c->list);
  1166. INIT_LIST_HEAD(&c->cached_devs);
  1167. INIT_LIST_HEAD(&c->btree_cache);
  1168. INIT_LIST_HEAD(&c->btree_cache_freeable);
  1169. INIT_LIST_HEAD(&c->btree_cache_freed);
  1170. INIT_LIST_HEAD(&c->data_buckets);
  1171. c->search = mempool_create_slab_pool(32, bch_search_cache);
  1172. if (!c->search)
  1173. goto err;
  1174. iter_size = (sb->bucket_size / sb->block_size + 1) *
  1175. sizeof(struct btree_iter_set);
  1176. if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
  1177. !(c->bio_meta = mempool_create_kmalloc_pool(2,
  1178. sizeof(struct bbio) + sizeof(struct bio_vec) *
  1179. bucket_pages(c))) ||
  1180. !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
  1181. !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
  1182. !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
  1183. !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
  1184. WQ_MEM_RECLAIM, 0)) ||
  1185. bch_journal_alloc(c) ||
  1186. bch_btree_cache_alloc(c) ||
  1187. bch_open_buckets_alloc(c) ||
  1188. bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
  1189. goto err;
  1190. c->congested_read_threshold_us = 2000;
  1191. c->congested_write_threshold_us = 20000;
  1192. c->error_limit = 8 << IO_ERROR_SHIFT;
  1193. return c;
  1194. err:
  1195. bch_cache_set_unregister(c);
  1196. return NULL;
  1197. }
  1198. static void run_cache_set(struct cache_set *c)
  1199. {
  1200. const char *err = "cannot allocate memory";
  1201. struct cached_dev *dc, *t;
  1202. struct cache *ca;
  1203. struct closure cl;
  1204. unsigned i;
  1205. closure_init_stack(&cl);
  1206. for_each_cache(ca, c, i)
  1207. c->nbuckets += ca->sb.nbuckets;
  1208. set_gc_sectors(c);
  1209. if (CACHE_SYNC(&c->sb)) {
  1210. LIST_HEAD(journal);
  1211. struct bkey *k;
  1212. struct jset *j;
  1213. err = "cannot allocate memory for journal";
  1214. if (bch_journal_read(c, &journal))
  1215. goto err;
  1216. pr_debug("btree_journal_read() done");
  1217. err = "no journal entries found";
  1218. if (list_empty(&journal))
  1219. goto err;
  1220. j = &list_entry(journal.prev, struct journal_replay, list)->j;
  1221. err = "IO error reading priorities";
  1222. for_each_cache(ca, c, i)
  1223. prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
  1224. /*
  1225. * If prio_read() fails it'll call cache_set_error and we'll
  1226. * tear everything down right away, but if we perhaps checked
  1227. * sooner we could avoid journal replay.
  1228. */
  1229. k = &j->btree_root;
  1230. err = "bad btree root";
  1231. if (__bch_btree_ptr_invalid(c, k))
  1232. goto err;
  1233. err = "error reading btree root";
  1234. c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL);
  1235. if (IS_ERR_OR_NULL(c->root))
  1236. goto err;
  1237. list_del_init(&c->root->list);
  1238. rw_unlock(true, c->root);
  1239. err = uuid_read(c, j, &cl);
  1240. if (err)
  1241. goto err;
  1242. err = "error in recovery";
  1243. if (bch_btree_check(c))
  1244. goto err;
  1245. bch_journal_mark(c, &journal);
  1246. bch_initial_gc_finish(c);
  1247. pr_debug("btree_check() done");
  1248. /*
  1249. * bcache_journal_next() can't happen sooner, or
  1250. * btree_gc_finish() will give spurious errors about last_gc >
  1251. * gc_gen - this is a hack but oh well.
  1252. */
  1253. bch_journal_next(&c->journal);
  1254. err = "error starting allocator thread";
  1255. for_each_cache(ca, c, i)
  1256. if (bch_cache_allocator_start(ca))
  1257. goto err;
  1258. /*
  1259. * First place it's safe to allocate: btree_check() and
  1260. * btree_gc_finish() have to run before we have buckets to
  1261. * allocate, and bch_bucket_alloc_set() might cause a journal
  1262. * entry to be written so bcache_journal_next() has to be called
  1263. * first.
  1264. *
  1265. * If the uuids were in the old format we have to rewrite them
  1266. * before the next journal entry is written:
  1267. */
  1268. if (j->version < BCACHE_JSET_VERSION_UUID)
  1269. __uuid_write(c);
  1270. bch_journal_replay(c, &journal);
  1271. } else {
  1272. pr_notice("invalidating existing data");
  1273. for_each_cache(ca, c, i) {
  1274. unsigned j;
  1275. ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
  1276. 2, SB_JOURNAL_BUCKETS);
  1277. for (j = 0; j < ca->sb.keys; j++)
  1278. ca->sb.d[j] = ca->sb.first_bucket + j;
  1279. }
  1280. bch_initial_gc_finish(c);
  1281. err = "error starting allocator thread";
  1282. for_each_cache(ca, c, i)
  1283. if (bch_cache_allocator_start(ca))
  1284. goto err;
  1285. mutex_lock(&c->bucket_lock);
  1286. for_each_cache(ca, c, i)
  1287. bch_prio_write(ca);
  1288. mutex_unlock(&c->bucket_lock);
  1289. err = "cannot allocate new UUID bucket";
  1290. if (__uuid_write(c))
  1291. goto err;
  1292. err = "cannot allocate new btree root";
  1293. c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
  1294. if (IS_ERR_OR_NULL(c->root))
  1295. goto err;
  1296. mutex_lock(&c->root->write_lock);
  1297. bkey_copy_key(&c->root->key, &MAX_KEY);
  1298. bch_btree_node_write(c->root, &cl);
  1299. mutex_unlock(&c->root->write_lock);
  1300. bch_btree_set_root(c->root);
  1301. rw_unlock(true, c->root);
  1302. /*
  1303. * We don't want to write the first journal entry until
  1304. * everything is set up - fortunately journal entries won't be
  1305. * written until the SET_CACHE_SYNC() here:
  1306. */
  1307. SET_CACHE_SYNC(&c->sb, true);
  1308. bch_journal_next(&c->journal);
  1309. bch_journal_meta(c, &cl);
  1310. }
  1311. err = "error starting gc thread";
  1312. if (bch_gc_thread_start(c))
  1313. goto err;
  1314. closure_sync(&cl);
  1315. c->sb.last_mount = get_seconds();
  1316. bcache_write_super(c);
  1317. list_for_each_entry_safe(dc, t, &uncached_devices, list)
  1318. bch_cached_dev_attach(dc, c);
  1319. flash_devs_run(c);
  1320. set_bit(CACHE_SET_RUNNING, &c->flags);
  1321. return;
  1322. err:
  1323. closure_sync(&cl);
  1324. /* XXX: test this, it's broken */
  1325. bch_cache_set_error(c, "%s", err);
  1326. }
  1327. static bool can_attach_cache(struct cache *ca, struct cache_set *c)
  1328. {
  1329. return ca->sb.block_size == c->sb.block_size &&
  1330. ca->sb.bucket_size == c->sb.bucket_size &&
  1331. ca->sb.nr_in_set == c->sb.nr_in_set;
  1332. }
  1333. static const char *register_cache_set(struct cache *ca)
  1334. {
  1335. char buf[12];
  1336. const char *err = "cannot allocate memory";
  1337. struct cache_set *c;
  1338. list_for_each_entry(c, &bch_cache_sets, list)
  1339. if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
  1340. if (c->cache[ca->sb.nr_this_dev])
  1341. return "duplicate cache set member";
  1342. if (!can_attach_cache(ca, c))
  1343. return "cache sb does not match set";
  1344. if (!CACHE_SYNC(&ca->sb))
  1345. SET_CACHE_SYNC(&c->sb, false);
  1346. goto found;
  1347. }
  1348. c = bch_cache_set_alloc(&ca->sb);
  1349. if (!c)
  1350. return err;
  1351. err = "error creating kobject";
  1352. if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
  1353. kobject_add(&c->internal, &c->kobj, "internal"))
  1354. goto err;
  1355. if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
  1356. goto err;
  1357. bch_debug_init_cache_set(c);
  1358. list_add(&c->list, &bch_cache_sets);
  1359. found:
  1360. sprintf(buf, "cache%i", ca->sb.nr_this_dev);
  1361. if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
  1362. sysfs_create_link(&c->kobj, &ca->kobj, buf))
  1363. goto err;
  1364. if (ca->sb.seq > c->sb.seq) {
  1365. c->sb.version = ca->sb.version;
  1366. memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
  1367. c->sb.flags = ca->sb.flags;
  1368. c->sb.seq = ca->sb.seq;
  1369. pr_debug("set version = %llu", c->sb.version);
  1370. }
  1371. kobject_get(&ca->kobj);
  1372. ca->set = c;
  1373. ca->set->cache[ca->sb.nr_this_dev] = ca;
  1374. c->cache_by_alloc[c->caches_loaded++] = ca;
  1375. if (c->caches_loaded == c->sb.nr_in_set)
  1376. run_cache_set(c);
  1377. return NULL;
  1378. err:
  1379. bch_cache_set_unregister(c);
  1380. return err;
  1381. }
  1382. /* Cache device */
  1383. void bch_cache_release(struct kobject *kobj)
  1384. {
  1385. struct cache *ca = container_of(kobj, struct cache, kobj);
  1386. unsigned i;
  1387. if (ca->set) {
  1388. BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
  1389. ca->set->cache[ca->sb.nr_this_dev] = NULL;
  1390. }
  1391. free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
  1392. kfree(ca->prio_buckets);
  1393. vfree(ca->buckets);
  1394. free_heap(&ca->heap);
  1395. free_fifo(&ca->free_inc);
  1396. for (i = 0; i < RESERVE_NR; i++)
  1397. free_fifo(&ca->free[i]);
  1398. if (ca->sb_bio.bi_inline_vecs[0].bv_page)
  1399. put_page(ca->sb_bio.bi_io_vec[0].bv_page);
  1400. if (!IS_ERR_OR_NULL(ca->bdev))
  1401. blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1402. kfree(ca);
  1403. module_put(THIS_MODULE);
  1404. }
  1405. static int cache_alloc(struct cache *ca)
  1406. {
  1407. size_t free;
  1408. struct bucket *b;
  1409. __module_get(THIS_MODULE);
  1410. kobject_init(&ca->kobj, &bch_cache_ktype);
  1411. bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
  1412. free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
  1413. if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
  1414. !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
  1415. !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
  1416. !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
  1417. !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
  1418. !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
  1419. !(ca->buckets = vzalloc(sizeof(struct bucket) *
  1420. ca->sb.nbuckets)) ||
  1421. !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
  1422. 2, GFP_KERNEL)) ||
  1423. !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)))
  1424. return -ENOMEM;
  1425. ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
  1426. for_each_bucket(b, ca)
  1427. atomic_set(&b->pin, 0);
  1428. return 0;
  1429. }
  1430. static int register_cache(struct cache_sb *sb, struct page *sb_page,
  1431. struct block_device *bdev, struct cache *ca)
  1432. {
  1433. char name[BDEVNAME_SIZE];
  1434. const char *err = NULL; /* must be set for any error case */
  1435. int ret = 0;
  1436. memcpy(&ca->sb, sb, sizeof(struct cache_sb));
  1437. ca->bdev = bdev;
  1438. ca->bdev->bd_holder = ca;
  1439. bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
  1440. ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
  1441. get_page(sb_page);
  1442. if (blk_queue_discard(bdev_get_queue(ca->bdev)))
  1443. ca->discard = CACHE_DISCARD(&ca->sb);
  1444. ret = cache_alloc(ca);
  1445. if (ret != 0) {
  1446. if (ret == -ENOMEM)
  1447. err = "cache_alloc(): -ENOMEM";
  1448. else
  1449. err = "cache_alloc(): unknown error";
  1450. goto err;
  1451. }
  1452. if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
  1453. err = "error calling kobject_add";
  1454. ret = -ENOMEM;
  1455. goto out;
  1456. }
  1457. mutex_lock(&bch_register_lock);
  1458. err = register_cache_set(ca);
  1459. mutex_unlock(&bch_register_lock);
  1460. if (err) {
  1461. ret = -ENODEV;
  1462. goto out;
  1463. }
  1464. pr_info("registered cache device %s", bdevname(bdev, name));
  1465. out:
  1466. kobject_put(&ca->kobj);
  1467. err:
  1468. if (err)
  1469. pr_notice("error opening %s: %s", bdevname(bdev, name), err);
  1470. return ret;
  1471. }
  1472. /* Global interfaces/init */
  1473. static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
  1474. const char *, size_t);
  1475. kobj_attribute_write(register, register_bcache);
  1476. kobj_attribute_write(register_quiet, register_bcache);
  1477. static bool bch_is_open_backing(struct block_device *bdev) {
  1478. struct cache_set *c, *tc;
  1479. struct cached_dev *dc, *t;
  1480. list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
  1481. list_for_each_entry_safe(dc, t, &c->cached_devs, list)
  1482. if (dc->bdev == bdev)
  1483. return true;
  1484. list_for_each_entry_safe(dc, t, &uncached_devices, list)
  1485. if (dc->bdev == bdev)
  1486. return true;
  1487. return false;
  1488. }
  1489. static bool bch_is_open_cache(struct block_device *bdev) {
  1490. struct cache_set *c, *tc;
  1491. struct cache *ca;
  1492. unsigned i;
  1493. list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
  1494. for_each_cache(ca, c, i)
  1495. if (ca->bdev == bdev)
  1496. return true;
  1497. return false;
  1498. }
  1499. static bool bch_is_open(struct block_device *bdev) {
  1500. return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
  1501. }
  1502. static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
  1503. const char *buffer, size_t size)
  1504. {
  1505. ssize_t ret = size;
  1506. const char *err = "cannot allocate memory";
  1507. char *path = NULL;
  1508. struct cache_sb *sb = NULL;
  1509. struct block_device *bdev = NULL;
  1510. struct page *sb_page = NULL;
  1511. if (!try_module_get(THIS_MODULE))
  1512. return -EBUSY;
  1513. if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
  1514. !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
  1515. goto err;
  1516. err = "failed to open device";
  1517. bdev = blkdev_get_by_path(strim(path),
  1518. FMODE_READ|FMODE_WRITE|FMODE_EXCL,
  1519. sb);
  1520. if (IS_ERR(bdev)) {
  1521. if (bdev == ERR_PTR(-EBUSY)) {
  1522. bdev = lookup_bdev(strim(path));
  1523. mutex_lock(&bch_register_lock);
  1524. if (!IS_ERR(bdev) && bch_is_open(bdev))
  1525. err = "device already registered";
  1526. else
  1527. err = "device busy";
  1528. mutex_unlock(&bch_register_lock);
  1529. if (attr == &ksysfs_register_quiet)
  1530. goto out;
  1531. }
  1532. goto err;
  1533. }
  1534. err = "failed to set blocksize";
  1535. if (set_blocksize(bdev, 4096))
  1536. goto err_close;
  1537. err = read_super(sb, bdev, &sb_page);
  1538. if (err)
  1539. goto err_close;
  1540. if (SB_IS_BDEV(sb)) {
  1541. struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
  1542. if (!dc)
  1543. goto err_close;
  1544. mutex_lock(&bch_register_lock);
  1545. register_bdev(sb, sb_page, bdev, dc);
  1546. mutex_unlock(&bch_register_lock);
  1547. } else {
  1548. struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  1549. if (!ca)
  1550. goto err_close;
  1551. if (register_cache(sb, sb_page, bdev, ca) != 0)
  1552. goto err_close;
  1553. }
  1554. out:
  1555. if (sb_page)
  1556. put_page(sb_page);
  1557. kfree(sb);
  1558. kfree(path);
  1559. module_put(THIS_MODULE);
  1560. return ret;
  1561. err_close:
  1562. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1563. err:
  1564. pr_info("error opening %s: %s", path, err);
  1565. ret = -EINVAL;
  1566. goto out;
  1567. }
  1568. static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
  1569. {
  1570. if (code == SYS_DOWN ||
  1571. code == SYS_HALT ||
  1572. code == SYS_POWER_OFF) {
  1573. DEFINE_WAIT(wait);
  1574. unsigned long start = jiffies;
  1575. bool stopped = false;
  1576. struct cache_set *c, *tc;
  1577. struct cached_dev *dc, *tdc;
  1578. mutex_lock(&bch_register_lock);
  1579. if (list_empty(&bch_cache_sets) &&
  1580. list_empty(&uncached_devices))
  1581. goto out;
  1582. pr_info("Stopping all devices:");
  1583. list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
  1584. bch_cache_set_stop(c);
  1585. list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
  1586. bcache_device_stop(&dc->disk);
  1587. /* What's a condition variable? */
  1588. while (1) {
  1589. long timeout = start + 2 * HZ - jiffies;
  1590. stopped = list_empty(&bch_cache_sets) &&
  1591. list_empty(&uncached_devices);
  1592. if (timeout < 0 || stopped)
  1593. break;
  1594. prepare_to_wait(&unregister_wait, &wait,
  1595. TASK_UNINTERRUPTIBLE);
  1596. mutex_unlock(&bch_register_lock);
  1597. schedule_timeout(timeout);
  1598. mutex_lock(&bch_register_lock);
  1599. }
  1600. finish_wait(&unregister_wait, &wait);
  1601. if (stopped)
  1602. pr_info("All devices stopped");
  1603. else
  1604. pr_notice("Timeout waiting for devices to be closed");
  1605. out:
  1606. mutex_unlock(&bch_register_lock);
  1607. }
  1608. return NOTIFY_DONE;
  1609. }
  1610. static struct notifier_block reboot = {
  1611. .notifier_call = bcache_reboot,
  1612. .priority = INT_MAX, /* before any real devices */
  1613. };
  1614. static void bcache_exit(void)
  1615. {
  1616. bch_debug_exit();
  1617. bch_request_exit();
  1618. if (bcache_kobj)
  1619. kobject_put(bcache_kobj);
  1620. if (bcache_wq)
  1621. destroy_workqueue(bcache_wq);
  1622. if (bcache_major)
  1623. unregister_blkdev(bcache_major, "bcache");
  1624. unregister_reboot_notifier(&reboot);
  1625. }
  1626. static int __init bcache_init(void)
  1627. {
  1628. static const struct attribute *files[] = {
  1629. &ksysfs_register.attr,
  1630. &ksysfs_register_quiet.attr,
  1631. NULL
  1632. };
  1633. mutex_init(&bch_register_lock);
  1634. init_waitqueue_head(&unregister_wait);
  1635. register_reboot_notifier(&reboot);
  1636. closure_debug_init();
  1637. bcache_major = register_blkdev(0, "bcache");
  1638. if (bcache_major < 0) {
  1639. unregister_reboot_notifier(&reboot);
  1640. return bcache_major;
  1641. }
  1642. if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
  1643. !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
  1644. sysfs_create_files(bcache_kobj, files) ||
  1645. bch_request_init() ||
  1646. bch_debug_init(bcache_kobj))
  1647. goto err;
  1648. return 0;
  1649. err:
  1650. bcache_exit();
  1651. return -ENOMEM;
  1652. }
  1653. module_exit(bcache_exit);
  1654. module_init(bcache_init);