super.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122
  1. /*
  2. * bcache setup/teardown code, and some metadata io - read a superblock and
  3. * figure out what to do with it.
  4. *
  5. * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
  6. * Copyright 2012 Google, Inc.
  7. */
  8. #include "bcache.h"
  9. #include "btree.h"
  10. #include "debug.h"
  11. #include "extents.h"
  12. #include "request.h"
  13. #include "writeback.h"
  14. #include <linux/blkdev.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/debugfs.h>
  17. #include <linux/genhd.h>
  18. #include <linux/idr.h>
  19. #include <linux/kthread.h>
  20. #include <linux/module.h>
  21. #include <linux/random.h>
  22. #include <linux/reboot.h>
  23. #include <linux/sysfs.h>
  24. MODULE_LICENSE("GPL");
  25. MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
  26. static const char bcache_magic[] = {
  27. 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
  28. 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
  29. };
  30. static const char invalid_uuid[] = {
  31. 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
  32. 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
  33. };
  34. /* Default is -1; we skip past it for struct cached_dev's cache mode */
  35. const char * const bch_cache_modes[] = {
  36. "default",
  37. "writethrough",
  38. "writeback",
  39. "writearound",
  40. "none",
  41. NULL
  42. };
  43. static struct kobject *bcache_kobj;
  44. struct mutex bch_register_lock;
  45. LIST_HEAD(bch_cache_sets);
  46. static LIST_HEAD(uncached_devices);
  47. static int bcache_major;
  48. static DEFINE_IDA(bcache_minor);
  49. static wait_queue_head_t unregister_wait;
  50. struct workqueue_struct *bcache_wq;
  51. #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
  52. /* Superblock */
  53. static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
  54. struct page **res)
  55. {
  56. const char *err;
  57. struct cache_sb *s;
  58. struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
  59. unsigned i;
  60. if (!bh)
  61. return "IO error";
  62. s = (struct cache_sb *) bh->b_data;
  63. sb->offset = le64_to_cpu(s->offset);
  64. sb->version = le64_to_cpu(s->version);
  65. memcpy(sb->magic, s->magic, 16);
  66. memcpy(sb->uuid, s->uuid, 16);
  67. memcpy(sb->set_uuid, s->set_uuid, 16);
  68. memcpy(sb->label, s->label, SB_LABEL_SIZE);
  69. sb->flags = le64_to_cpu(s->flags);
  70. sb->seq = le64_to_cpu(s->seq);
  71. sb->last_mount = le32_to_cpu(s->last_mount);
  72. sb->first_bucket = le16_to_cpu(s->first_bucket);
  73. sb->keys = le16_to_cpu(s->keys);
  74. for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
  75. sb->d[i] = le64_to_cpu(s->d[i]);
  76. pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
  77. sb->version, sb->flags, sb->seq, sb->keys);
  78. err = "Not a bcache superblock";
  79. if (sb->offset != SB_SECTOR)
  80. goto err;
  81. if (memcmp(sb->magic, bcache_magic, 16))
  82. goto err;
  83. err = "Too many journal buckets";
  84. if (sb->keys > SB_JOURNAL_BUCKETS)
  85. goto err;
  86. err = "Bad checksum";
  87. if (s->csum != csum_set(s))
  88. goto err;
  89. err = "Bad UUID";
  90. if (bch_is_zero(sb->uuid, 16))
  91. goto err;
  92. sb->block_size = le16_to_cpu(s->block_size);
  93. err = "Superblock block size smaller than device block size";
  94. if (sb->block_size << 9 < bdev_logical_block_size(bdev))
  95. goto err;
  96. switch (sb->version) {
  97. case BCACHE_SB_VERSION_BDEV:
  98. sb->data_offset = BDEV_DATA_START_DEFAULT;
  99. break;
  100. case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
  101. sb->data_offset = le64_to_cpu(s->data_offset);
  102. err = "Bad data offset";
  103. if (sb->data_offset < BDEV_DATA_START_DEFAULT)
  104. goto err;
  105. break;
  106. case BCACHE_SB_VERSION_CDEV:
  107. case BCACHE_SB_VERSION_CDEV_WITH_UUID:
  108. sb->nbuckets = le64_to_cpu(s->nbuckets);
  109. sb->bucket_size = le16_to_cpu(s->bucket_size);
  110. sb->nr_in_set = le16_to_cpu(s->nr_in_set);
  111. sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
  112. err = "Too many buckets";
  113. if (sb->nbuckets > LONG_MAX)
  114. goto err;
  115. err = "Not enough buckets";
  116. if (sb->nbuckets < 1 << 7)
  117. goto err;
  118. err = "Bad block/bucket size";
  119. if (!is_power_of_2(sb->block_size) ||
  120. sb->block_size > PAGE_SECTORS ||
  121. !is_power_of_2(sb->bucket_size) ||
  122. sb->bucket_size < PAGE_SECTORS)
  123. goto err;
  124. err = "Invalid superblock: device too small";
  125. if (get_capacity(bdev->bd_disk) < sb->bucket_size * sb->nbuckets)
  126. goto err;
  127. err = "Bad UUID";
  128. if (bch_is_zero(sb->set_uuid, 16))
  129. goto err;
  130. err = "Bad cache device number in set";
  131. if (!sb->nr_in_set ||
  132. sb->nr_in_set <= sb->nr_this_dev ||
  133. sb->nr_in_set > MAX_CACHES_PER_SET)
  134. goto err;
  135. err = "Journal buckets not sequential";
  136. for (i = 0; i < sb->keys; i++)
  137. if (sb->d[i] != sb->first_bucket + i)
  138. goto err;
  139. err = "Too many journal buckets";
  140. if (sb->first_bucket + sb->keys > sb->nbuckets)
  141. goto err;
  142. err = "Invalid superblock: first bucket comes before end of super";
  143. if (sb->first_bucket * sb->bucket_size < 16)
  144. goto err;
  145. break;
  146. default:
  147. err = "Unsupported superblock version";
  148. goto err;
  149. }
  150. sb->last_mount = get_seconds();
  151. err = NULL;
  152. get_page(bh->b_page);
  153. *res = bh->b_page;
  154. err:
  155. put_bh(bh);
  156. return err;
  157. }
  158. static void write_bdev_super_endio(struct bio *bio)
  159. {
  160. struct cached_dev *dc = bio->bi_private;
  161. /* XXX: error checking */
  162. closure_put(&dc->sb_write);
  163. }
  164. static void __write_super(struct cache_sb *sb, struct bio *bio)
  165. {
  166. struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
  167. unsigned i;
  168. bio->bi_iter.bi_sector = SB_SECTOR;
  169. bio->bi_iter.bi_size = SB_SIZE;
  170. bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
  171. bch_bio_map(bio, NULL);
  172. out->offset = cpu_to_le64(sb->offset);
  173. out->version = cpu_to_le64(sb->version);
  174. memcpy(out->uuid, sb->uuid, 16);
  175. memcpy(out->set_uuid, sb->set_uuid, 16);
  176. memcpy(out->label, sb->label, SB_LABEL_SIZE);
  177. out->flags = cpu_to_le64(sb->flags);
  178. out->seq = cpu_to_le64(sb->seq);
  179. out->last_mount = cpu_to_le32(sb->last_mount);
  180. out->first_bucket = cpu_to_le16(sb->first_bucket);
  181. out->keys = cpu_to_le16(sb->keys);
  182. for (i = 0; i < sb->keys; i++)
  183. out->d[i] = cpu_to_le64(sb->d[i]);
  184. out->csum = csum_set(out);
  185. pr_debug("ver %llu, flags %llu, seq %llu",
  186. sb->version, sb->flags, sb->seq);
  187. submit_bio(bio);
  188. }
  189. static void bch_write_bdev_super_unlock(struct closure *cl)
  190. {
  191. struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
  192. up(&dc->sb_write_mutex);
  193. }
  194. void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
  195. {
  196. struct closure *cl = &dc->sb_write;
  197. struct bio *bio = &dc->sb_bio;
  198. down(&dc->sb_write_mutex);
  199. closure_init(cl, parent);
  200. bio_reset(bio);
  201. bio->bi_bdev = dc->bdev;
  202. bio->bi_end_io = write_bdev_super_endio;
  203. bio->bi_private = dc;
  204. closure_get(cl);
  205. __write_super(&dc->sb, bio);
  206. closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
  207. }
  208. static void write_super_endio(struct bio *bio)
  209. {
  210. struct cache *ca = bio->bi_private;
  211. bch_count_io_errors(ca, bio->bi_error, "writing superblock");
  212. closure_put(&ca->set->sb_write);
  213. }
  214. static void bcache_write_super_unlock(struct closure *cl)
  215. {
  216. struct cache_set *c = container_of(cl, struct cache_set, sb_write);
  217. up(&c->sb_write_mutex);
  218. }
  219. void bcache_write_super(struct cache_set *c)
  220. {
  221. struct closure *cl = &c->sb_write;
  222. struct cache *ca;
  223. unsigned i;
  224. down(&c->sb_write_mutex);
  225. closure_init(cl, &c->cl);
  226. c->sb.seq++;
  227. for_each_cache(ca, c, i) {
  228. struct bio *bio = &ca->sb_bio;
  229. ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
  230. ca->sb.seq = c->sb.seq;
  231. ca->sb.last_mount = c->sb.last_mount;
  232. SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
  233. bio_reset(bio);
  234. bio->bi_bdev = ca->bdev;
  235. bio->bi_end_io = write_super_endio;
  236. bio->bi_private = ca;
  237. closure_get(cl);
  238. __write_super(&ca->sb, bio);
  239. }
  240. closure_return_with_destructor(cl, bcache_write_super_unlock);
  241. }
  242. /* UUID io */
  243. static void uuid_endio(struct bio *bio)
  244. {
  245. struct closure *cl = bio->bi_private;
  246. struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
  247. cache_set_err_on(bio->bi_error, c, "accessing uuids");
  248. bch_bbio_free(bio, c);
  249. closure_put(cl);
  250. }
  251. static void uuid_io_unlock(struct closure *cl)
  252. {
  253. struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
  254. up(&c->uuid_write_mutex);
  255. }
  256. static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
  257. struct bkey *k, struct closure *parent)
  258. {
  259. struct closure *cl = &c->uuid_write;
  260. struct uuid_entry *u;
  261. unsigned i;
  262. char buf[80];
  263. BUG_ON(!parent);
  264. down(&c->uuid_write_mutex);
  265. closure_init(cl, parent);
  266. for (i = 0; i < KEY_PTRS(k); i++) {
  267. struct bio *bio = bch_bbio_alloc(c);
  268. bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
  269. bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
  270. bio->bi_end_io = uuid_endio;
  271. bio->bi_private = cl;
  272. bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
  273. bch_bio_map(bio, c->uuids);
  274. bch_submit_bbio(bio, c, k, i);
  275. if (op != REQ_OP_WRITE)
  276. break;
  277. }
  278. bch_extent_to_text(buf, sizeof(buf), k);
  279. pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
  280. for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
  281. if (!bch_is_zero(u->uuid, 16))
  282. pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
  283. u - c->uuids, u->uuid, u->label,
  284. u->first_reg, u->last_reg, u->invalidated);
  285. closure_return_with_destructor(cl, uuid_io_unlock);
  286. }
  287. static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
  288. {
  289. struct bkey *k = &j->uuid_bucket;
  290. if (__bch_btree_ptr_invalid(c, k))
  291. return "bad uuid pointer";
  292. bkey_copy(&c->uuid_bucket, k);
  293. uuid_io(c, REQ_OP_READ, READ_SYNC, k, cl);
  294. if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
  295. struct uuid_entry_v0 *u0 = (void *) c->uuids;
  296. struct uuid_entry *u1 = (void *) c->uuids;
  297. int i;
  298. closure_sync(cl);
  299. /*
  300. * Since the new uuid entry is bigger than the old, we have to
  301. * convert starting at the highest memory address and work down
  302. * in order to do it in place
  303. */
  304. for (i = c->nr_uuids - 1;
  305. i >= 0;
  306. --i) {
  307. memcpy(u1[i].uuid, u0[i].uuid, 16);
  308. memcpy(u1[i].label, u0[i].label, 32);
  309. u1[i].first_reg = u0[i].first_reg;
  310. u1[i].last_reg = u0[i].last_reg;
  311. u1[i].invalidated = u0[i].invalidated;
  312. u1[i].flags = 0;
  313. u1[i].sectors = 0;
  314. }
  315. }
  316. return NULL;
  317. }
  318. static int __uuid_write(struct cache_set *c)
  319. {
  320. BKEY_PADDED(key) k;
  321. struct closure cl;
  322. closure_init_stack(&cl);
  323. lockdep_assert_held(&bch_register_lock);
  324. if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
  325. return 1;
  326. SET_KEY_SIZE(&k.key, c->sb.bucket_size);
  327. uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
  328. closure_sync(&cl);
  329. bkey_copy(&c->uuid_bucket, &k.key);
  330. bkey_put(c, &k.key);
  331. return 0;
  332. }
  333. int bch_uuid_write(struct cache_set *c)
  334. {
  335. int ret = __uuid_write(c);
  336. if (!ret)
  337. bch_journal_meta(c, NULL);
  338. return ret;
  339. }
  340. static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
  341. {
  342. struct uuid_entry *u;
  343. for (u = c->uuids;
  344. u < c->uuids + c->nr_uuids; u++)
  345. if (!memcmp(u->uuid, uuid, 16))
  346. return u;
  347. return NULL;
  348. }
  349. static struct uuid_entry *uuid_find_empty(struct cache_set *c)
  350. {
  351. static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
  352. return uuid_find(c, zero_uuid);
  353. }
  354. /*
  355. * Bucket priorities/gens:
  356. *
  357. * For each bucket, we store on disk its
  358. * 8 bit gen
  359. * 16 bit priority
  360. *
  361. * See alloc.c for an explanation of the gen. The priority is used to implement
  362. * lru (and in the future other) cache replacement policies; for most purposes
  363. * it's just an opaque integer.
  364. *
  365. * The gens and the priorities don't have a whole lot to do with each other, and
  366. * it's actually the gens that must be written out at specific times - it's no
  367. * big deal if the priorities don't get written, if we lose them we just reuse
  368. * buckets in suboptimal order.
  369. *
  370. * On disk they're stored in a packed array, and in as many buckets are required
  371. * to fit them all. The buckets we use to store them form a list; the journal
  372. * header points to the first bucket, the first bucket points to the second
  373. * bucket, et cetera.
  374. *
  375. * This code is used by the allocation code; periodically (whenever it runs out
  376. * of buckets to allocate from) the allocation code will invalidate some
  377. * buckets, but it can't use those buckets until their new gens are safely on
  378. * disk.
  379. */
  380. static void prio_endio(struct bio *bio)
  381. {
  382. struct cache *ca = bio->bi_private;
  383. cache_set_err_on(bio->bi_error, ca->set, "accessing priorities");
  384. bch_bbio_free(bio, ca->set);
  385. closure_put(&ca->prio);
  386. }
  387. static void prio_io(struct cache *ca, uint64_t bucket, int op,
  388. unsigned long op_flags)
  389. {
  390. struct closure *cl = &ca->prio;
  391. struct bio *bio = bch_bbio_alloc(ca->set);
  392. closure_init_stack(cl);
  393. bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
  394. bio->bi_bdev = ca->bdev;
  395. bio->bi_iter.bi_size = bucket_bytes(ca);
  396. bio->bi_end_io = prio_endio;
  397. bio->bi_private = ca;
  398. bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
  399. bch_bio_map(bio, ca->disk_buckets);
  400. closure_bio_submit(bio, &ca->prio);
  401. closure_sync(cl);
  402. }
  403. void bch_prio_write(struct cache *ca)
  404. {
  405. int i;
  406. struct bucket *b;
  407. struct closure cl;
  408. closure_init_stack(&cl);
  409. lockdep_assert_held(&ca->set->bucket_lock);
  410. ca->disk_buckets->seq++;
  411. atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
  412. &ca->meta_sectors_written);
  413. //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
  414. // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
  415. for (i = prio_buckets(ca) - 1; i >= 0; --i) {
  416. long bucket;
  417. struct prio_set *p = ca->disk_buckets;
  418. struct bucket_disk *d = p->data;
  419. struct bucket_disk *end = d + prios_per_bucket(ca);
  420. for (b = ca->buckets + i * prios_per_bucket(ca);
  421. b < ca->buckets + ca->sb.nbuckets && d < end;
  422. b++, d++) {
  423. d->prio = cpu_to_le16(b->prio);
  424. d->gen = b->gen;
  425. }
  426. p->next_bucket = ca->prio_buckets[i + 1];
  427. p->magic = pset_magic(&ca->sb);
  428. p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
  429. bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
  430. BUG_ON(bucket == -1);
  431. mutex_unlock(&ca->set->bucket_lock);
  432. prio_io(ca, bucket, REQ_OP_WRITE, 0);
  433. mutex_lock(&ca->set->bucket_lock);
  434. ca->prio_buckets[i] = bucket;
  435. atomic_dec_bug(&ca->buckets[bucket].pin);
  436. }
  437. mutex_unlock(&ca->set->bucket_lock);
  438. bch_journal_meta(ca->set, &cl);
  439. closure_sync(&cl);
  440. mutex_lock(&ca->set->bucket_lock);
  441. /*
  442. * Don't want the old priorities to get garbage collected until after we
  443. * finish writing the new ones, and they're journalled
  444. */
  445. for (i = 0; i < prio_buckets(ca); i++) {
  446. if (ca->prio_last_buckets[i])
  447. __bch_bucket_free(ca,
  448. &ca->buckets[ca->prio_last_buckets[i]]);
  449. ca->prio_last_buckets[i] = ca->prio_buckets[i];
  450. }
  451. }
  452. static void prio_read(struct cache *ca, uint64_t bucket)
  453. {
  454. struct prio_set *p = ca->disk_buckets;
  455. struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
  456. struct bucket *b;
  457. unsigned bucket_nr = 0;
  458. for (b = ca->buckets;
  459. b < ca->buckets + ca->sb.nbuckets;
  460. b++, d++) {
  461. if (d == end) {
  462. ca->prio_buckets[bucket_nr] = bucket;
  463. ca->prio_last_buckets[bucket_nr] = bucket;
  464. bucket_nr++;
  465. prio_io(ca, bucket, REQ_OP_READ, READ_SYNC);
  466. if (p->csum != bch_crc64(&p->magic, bucket_bytes(ca) - 8))
  467. pr_warn("bad csum reading priorities");
  468. if (p->magic != pset_magic(&ca->sb))
  469. pr_warn("bad magic reading priorities");
  470. bucket = p->next_bucket;
  471. d = p->data;
  472. }
  473. b->prio = le16_to_cpu(d->prio);
  474. b->gen = b->last_gc = d->gen;
  475. }
  476. }
  477. /* Bcache device */
  478. static int open_dev(struct block_device *b, fmode_t mode)
  479. {
  480. struct bcache_device *d = b->bd_disk->private_data;
  481. if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
  482. return -ENXIO;
  483. closure_get(&d->cl);
  484. return 0;
  485. }
  486. static void release_dev(struct gendisk *b, fmode_t mode)
  487. {
  488. struct bcache_device *d = b->private_data;
  489. closure_put(&d->cl);
  490. }
  491. static int ioctl_dev(struct block_device *b, fmode_t mode,
  492. unsigned int cmd, unsigned long arg)
  493. {
  494. struct bcache_device *d = b->bd_disk->private_data;
  495. return d->ioctl(d, mode, cmd, arg);
  496. }
  497. static const struct block_device_operations bcache_ops = {
  498. .open = open_dev,
  499. .release = release_dev,
  500. .ioctl = ioctl_dev,
  501. .owner = THIS_MODULE,
  502. };
  503. void bcache_device_stop(struct bcache_device *d)
  504. {
  505. if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
  506. closure_queue(&d->cl);
  507. }
  508. static void bcache_device_unlink(struct bcache_device *d)
  509. {
  510. lockdep_assert_held(&bch_register_lock);
  511. if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
  512. unsigned i;
  513. struct cache *ca;
  514. sysfs_remove_link(&d->c->kobj, d->name);
  515. sysfs_remove_link(&d->kobj, "cache");
  516. for_each_cache(ca, d->c, i)
  517. bd_unlink_disk_holder(ca->bdev, d->disk);
  518. }
  519. }
  520. static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
  521. const char *name)
  522. {
  523. unsigned i;
  524. struct cache *ca;
  525. for_each_cache(ca, d->c, i)
  526. bd_link_disk_holder(ca->bdev, d->disk);
  527. snprintf(d->name, BCACHEDEVNAME_SIZE,
  528. "%s%u", name, d->id);
  529. WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
  530. sysfs_create_link(&c->kobj, &d->kobj, d->name),
  531. "Couldn't create device <-> cache set symlinks");
  532. clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
  533. }
  534. static void bcache_device_detach(struct bcache_device *d)
  535. {
  536. lockdep_assert_held(&bch_register_lock);
  537. if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
  538. struct uuid_entry *u = d->c->uuids + d->id;
  539. SET_UUID_FLASH_ONLY(u, 0);
  540. memcpy(u->uuid, invalid_uuid, 16);
  541. u->invalidated = cpu_to_le32(get_seconds());
  542. bch_uuid_write(d->c);
  543. }
  544. bcache_device_unlink(d);
  545. d->c->devices[d->id] = NULL;
  546. closure_put(&d->c->caching);
  547. d->c = NULL;
  548. }
  549. static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
  550. unsigned id)
  551. {
  552. d->id = id;
  553. d->c = c;
  554. c->devices[id] = d;
  555. closure_get(&c->caching);
  556. }
  557. static void bcache_device_free(struct bcache_device *d)
  558. {
  559. lockdep_assert_held(&bch_register_lock);
  560. pr_info("%s stopped", d->disk->disk_name);
  561. if (d->c)
  562. bcache_device_detach(d);
  563. if (d->disk && d->disk->flags & GENHD_FL_UP)
  564. del_gendisk(d->disk);
  565. if (d->disk && d->disk->queue)
  566. blk_cleanup_queue(d->disk->queue);
  567. if (d->disk) {
  568. ida_simple_remove(&bcache_minor, d->disk->first_minor);
  569. put_disk(d->disk);
  570. }
  571. if (d->bio_split)
  572. bioset_free(d->bio_split);
  573. kvfree(d->full_dirty_stripes);
  574. kvfree(d->stripe_sectors_dirty);
  575. closure_debug_destroy(&d->cl);
  576. }
  577. static int bcache_device_init(struct bcache_device *d, unsigned block_size,
  578. sector_t sectors)
  579. {
  580. struct request_queue *q;
  581. size_t n;
  582. int minor;
  583. if (!d->stripe_size)
  584. d->stripe_size = 1 << 31;
  585. d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
  586. if (!d->nr_stripes ||
  587. d->nr_stripes > INT_MAX ||
  588. d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) {
  589. pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
  590. (unsigned)d->nr_stripes);
  591. return -ENOMEM;
  592. }
  593. n = d->nr_stripes * sizeof(atomic_t);
  594. d->stripe_sectors_dirty = n < PAGE_SIZE << 6
  595. ? kzalloc(n, GFP_KERNEL)
  596. : vzalloc(n);
  597. if (!d->stripe_sectors_dirty)
  598. return -ENOMEM;
  599. n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
  600. d->full_dirty_stripes = n < PAGE_SIZE << 6
  601. ? kzalloc(n, GFP_KERNEL)
  602. : vzalloc(n);
  603. if (!d->full_dirty_stripes)
  604. return -ENOMEM;
  605. minor = ida_simple_get(&bcache_minor, 0, MINORMASK + 1, GFP_KERNEL);
  606. if (minor < 0)
  607. return minor;
  608. if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
  609. !(d->disk = alloc_disk(1))) {
  610. ida_simple_remove(&bcache_minor, minor);
  611. return -ENOMEM;
  612. }
  613. set_capacity(d->disk, sectors);
  614. snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", minor);
  615. d->disk->major = bcache_major;
  616. d->disk->first_minor = minor;
  617. d->disk->fops = &bcache_ops;
  618. d->disk->private_data = d;
  619. q = blk_alloc_queue(GFP_KERNEL);
  620. if (!q)
  621. return -ENOMEM;
  622. blk_queue_make_request(q, NULL);
  623. d->disk->queue = q;
  624. q->queuedata = d;
  625. q->backing_dev_info.congested_data = d;
  626. q->limits.max_hw_sectors = UINT_MAX;
  627. q->limits.max_sectors = UINT_MAX;
  628. q->limits.max_segment_size = UINT_MAX;
  629. q->limits.max_segments = BIO_MAX_PAGES;
  630. blk_queue_max_discard_sectors(q, UINT_MAX);
  631. q->limits.discard_granularity = 512;
  632. q->limits.io_min = block_size;
  633. q->limits.logical_block_size = block_size;
  634. q->limits.physical_block_size = block_size;
  635. set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags);
  636. clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags);
  637. set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags);
  638. blk_queue_write_cache(q, true, true);
  639. return 0;
  640. }
  641. /* Cached device */
  642. static void calc_cached_dev_sectors(struct cache_set *c)
  643. {
  644. uint64_t sectors = 0;
  645. struct cached_dev *dc;
  646. list_for_each_entry(dc, &c->cached_devs, list)
  647. sectors += bdev_sectors(dc->bdev);
  648. c->cached_dev_sectors = sectors;
  649. }
  650. void bch_cached_dev_run(struct cached_dev *dc)
  651. {
  652. struct bcache_device *d = &dc->disk;
  653. char buf[SB_LABEL_SIZE + 1];
  654. char *env[] = {
  655. "DRIVER=bcache",
  656. kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
  657. NULL,
  658. NULL,
  659. };
  660. memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
  661. buf[SB_LABEL_SIZE] = '\0';
  662. env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
  663. if (atomic_xchg(&dc->running, 1)) {
  664. kfree(env[1]);
  665. kfree(env[2]);
  666. return;
  667. }
  668. if (!d->c &&
  669. BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
  670. struct closure cl;
  671. closure_init_stack(&cl);
  672. SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
  673. bch_write_bdev_super(dc, &cl);
  674. closure_sync(&cl);
  675. }
  676. add_disk(d->disk);
  677. bd_link_disk_holder(dc->bdev, dc->disk.disk);
  678. /* won't show up in the uevent file, use udevadm monitor -e instead
  679. * only class / kset properties are persistent */
  680. kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
  681. kfree(env[1]);
  682. kfree(env[2]);
  683. if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
  684. sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
  685. pr_debug("error creating sysfs link");
  686. }
  687. static void cached_dev_detach_finish(struct work_struct *w)
  688. {
  689. struct cached_dev *dc = container_of(w, struct cached_dev, detach);
  690. char buf[BDEVNAME_SIZE];
  691. struct closure cl;
  692. closure_init_stack(&cl);
  693. BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
  694. BUG_ON(atomic_read(&dc->count));
  695. mutex_lock(&bch_register_lock);
  696. memset(&dc->sb.set_uuid, 0, 16);
  697. SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
  698. bch_write_bdev_super(dc, &cl);
  699. closure_sync(&cl);
  700. bcache_device_detach(&dc->disk);
  701. list_move(&dc->list, &uncached_devices);
  702. clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
  703. clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
  704. mutex_unlock(&bch_register_lock);
  705. pr_info("Caching disabled for %s", bdevname(dc->bdev, buf));
  706. /* Drop ref we took in cached_dev_detach() */
  707. closure_put(&dc->disk.cl);
  708. }
  709. void bch_cached_dev_detach(struct cached_dev *dc)
  710. {
  711. lockdep_assert_held(&bch_register_lock);
  712. if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
  713. return;
  714. if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
  715. return;
  716. /*
  717. * Block the device from being closed and freed until we're finished
  718. * detaching
  719. */
  720. closure_get(&dc->disk.cl);
  721. bch_writeback_queue(dc);
  722. cached_dev_put(dc);
  723. }
  724. int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
  725. {
  726. uint32_t rtime = cpu_to_le32(get_seconds());
  727. struct uuid_entry *u;
  728. char buf[BDEVNAME_SIZE];
  729. bdevname(dc->bdev, buf);
  730. if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
  731. return -ENOENT;
  732. if (dc->disk.c) {
  733. pr_err("Can't attach %s: already attached", buf);
  734. return -EINVAL;
  735. }
  736. if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
  737. pr_err("Can't attach %s: shutting down", buf);
  738. return -EINVAL;
  739. }
  740. if (dc->sb.block_size < c->sb.block_size) {
  741. /* Will die */
  742. pr_err("Couldn't attach %s: block size less than set's block size",
  743. buf);
  744. return -EINVAL;
  745. }
  746. u = uuid_find(c, dc->sb.uuid);
  747. if (u &&
  748. (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
  749. BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
  750. memcpy(u->uuid, invalid_uuid, 16);
  751. u->invalidated = cpu_to_le32(get_seconds());
  752. u = NULL;
  753. }
  754. if (!u) {
  755. if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
  756. pr_err("Couldn't find uuid for %s in set", buf);
  757. return -ENOENT;
  758. }
  759. u = uuid_find_empty(c);
  760. if (!u) {
  761. pr_err("Not caching %s, no room for UUID", buf);
  762. return -EINVAL;
  763. }
  764. }
  765. /* Deadlocks since we're called via sysfs...
  766. sysfs_remove_file(&dc->kobj, &sysfs_attach);
  767. */
  768. if (bch_is_zero(u->uuid, 16)) {
  769. struct closure cl;
  770. closure_init_stack(&cl);
  771. memcpy(u->uuid, dc->sb.uuid, 16);
  772. memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
  773. u->first_reg = u->last_reg = rtime;
  774. bch_uuid_write(c);
  775. memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
  776. SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
  777. bch_write_bdev_super(dc, &cl);
  778. closure_sync(&cl);
  779. } else {
  780. u->last_reg = rtime;
  781. bch_uuid_write(c);
  782. }
  783. bcache_device_attach(&dc->disk, c, u - c->uuids);
  784. list_move(&dc->list, &c->cached_devs);
  785. calc_cached_dev_sectors(c);
  786. smp_wmb();
  787. /*
  788. * dc->c must be set before dc->count != 0 - paired with the mb in
  789. * cached_dev_get()
  790. */
  791. atomic_set(&dc->count, 1);
  792. /* Block writeback thread, but spawn it */
  793. down_write(&dc->writeback_lock);
  794. if (bch_cached_dev_writeback_start(dc)) {
  795. up_write(&dc->writeback_lock);
  796. return -ENOMEM;
  797. }
  798. if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
  799. bch_sectors_dirty_init(dc);
  800. atomic_set(&dc->has_dirty, 1);
  801. atomic_inc(&dc->count);
  802. bch_writeback_queue(dc);
  803. }
  804. bch_cached_dev_run(dc);
  805. bcache_device_link(&dc->disk, c, "bdev");
  806. /* Allow the writeback thread to proceed */
  807. up_write(&dc->writeback_lock);
  808. pr_info("Caching %s as %s on set %pU",
  809. bdevname(dc->bdev, buf), dc->disk.disk->disk_name,
  810. dc->disk.c->sb.set_uuid);
  811. return 0;
  812. }
  813. void bch_cached_dev_release(struct kobject *kobj)
  814. {
  815. struct cached_dev *dc = container_of(kobj, struct cached_dev,
  816. disk.kobj);
  817. kfree(dc);
  818. module_put(THIS_MODULE);
  819. }
  820. static void cached_dev_free(struct closure *cl)
  821. {
  822. struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
  823. cancel_delayed_work_sync(&dc->writeback_rate_update);
  824. if (!IS_ERR_OR_NULL(dc->writeback_thread))
  825. kthread_stop(dc->writeback_thread);
  826. mutex_lock(&bch_register_lock);
  827. if (atomic_read(&dc->running))
  828. bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
  829. bcache_device_free(&dc->disk);
  830. list_del(&dc->list);
  831. mutex_unlock(&bch_register_lock);
  832. if (!IS_ERR_OR_NULL(dc->bdev))
  833. blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  834. wake_up(&unregister_wait);
  835. kobject_put(&dc->disk.kobj);
  836. }
  837. static void cached_dev_flush(struct closure *cl)
  838. {
  839. struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
  840. struct bcache_device *d = &dc->disk;
  841. mutex_lock(&bch_register_lock);
  842. bcache_device_unlink(d);
  843. mutex_unlock(&bch_register_lock);
  844. bch_cache_accounting_destroy(&dc->accounting);
  845. kobject_del(&d->kobj);
  846. continue_at(cl, cached_dev_free, system_wq);
  847. }
  848. static int cached_dev_init(struct cached_dev *dc, unsigned block_size)
  849. {
  850. int ret;
  851. struct io *io;
  852. struct request_queue *q = bdev_get_queue(dc->bdev);
  853. __module_get(THIS_MODULE);
  854. INIT_LIST_HEAD(&dc->list);
  855. closure_init(&dc->disk.cl, NULL);
  856. set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
  857. kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
  858. INIT_WORK(&dc->detach, cached_dev_detach_finish);
  859. sema_init(&dc->sb_write_mutex, 1);
  860. INIT_LIST_HEAD(&dc->io_lru);
  861. spin_lock_init(&dc->io_lock);
  862. bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
  863. dc->sequential_cutoff = 4 << 20;
  864. for (io = dc->io; io < dc->io + RECENT_IO; io++) {
  865. list_add(&io->lru, &dc->io_lru);
  866. hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
  867. }
  868. dc->disk.stripe_size = q->limits.io_opt >> 9;
  869. if (dc->disk.stripe_size)
  870. dc->partial_stripes_expensive =
  871. q->limits.raid_partial_stripes_expensive;
  872. ret = bcache_device_init(&dc->disk, block_size,
  873. dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
  874. if (ret)
  875. return ret;
  876. set_capacity(dc->disk.disk,
  877. dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
  878. dc->disk.disk->queue->backing_dev_info.ra_pages =
  879. max(dc->disk.disk->queue->backing_dev_info.ra_pages,
  880. q->backing_dev_info.ra_pages);
  881. bch_cached_dev_request_init(dc);
  882. bch_cached_dev_writeback_init(dc);
  883. return 0;
  884. }
  885. /* Cached device - bcache superblock */
  886. static void register_bdev(struct cache_sb *sb, struct page *sb_page,
  887. struct block_device *bdev,
  888. struct cached_dev *dc)
  889. {
  890. char name[BDEVNAME_SIZE];
  891. const char *err = "cannot allocate memory";
  892. struct cache_set *c;
  893. memcpy(&dc->sb, sb, sizeof(struct cache_sb));
  894. dc->bdev = bdev;
  895. dc->bdev->bd_holder = dc;
  896. bio_init(&dc->sb_bio);
  897. dc->sb_bio.bi_max_vecs = 1;
  898. dc->sb_bio.bi_io_vec = dc->sb_bio.bi_inline_vecs;
  899. dc->sb_bio.bi_io_vec[0].bv_page = sb_page;
  900. get_page(sb_page);
  901. if (cached_dev_init(dc, sb->block_size << 9))
  902. goto err;
  903. err = "error creating kobject";
  904. if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
  905. "bcache"))
  906. goto err;
  907. if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
  908. goto err;
  909. pr_info("registered backing device %s", bdevname(bdev, name));
  910. list_add(&dc->list, &uncached_devices);
  911. list_for_each_entry(c, &bch_cache_sets, list)
  912. bch_cached_dev_attach(dc, c);
  913. if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
  914. BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
  915. bch_cached_dev_run(dc);
  916. return;
  917. err:
  918. pr_notice("error opening %s: %s", bdevname(bdev, name), err);
  919. bcache_device_stop(&dc->disk);
  920. }
  921. /* Flash only volumes */
  922. void bch_flash_dev_release(struct kobject *kobj)
  923. {
  924. struct bcache_device *d = container_of(kobj, struct bcache_device,
  925. kobj);
  926. kfree(d);
  927. }
  928. static void flash_dev_free(struct closure *cl)
  929. {
  930. struct bcache_device *d = container_of(cl, struct bcache_device, cl);
  931. mutex_lock(&bch_register_lock);
  932. bcache_device_free(d);
  933. mutex_unlock(&bch_register_lock);
  934. kobject_put(&d->kobj);
  935. }
  936. static void flash_dev_flush(struct closure *cl)
  937. {
  938. struct bcache_device *d = container_of(cl, struct bcache_device, cl);
  939. mutex_lock(&bch_register_lock);
  940. bcache_device_unlink(d);
  941. mutex_unlock(&bch_register_lock);
  942. kobject_del(&d->kobj);
  943. continue_at(cl, flash_dev_free, system_wq);
  944. }
  945. static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
  946. {
  947. struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
  948. GFP_KERNEL);
  949. if (!d)
  950. return -ENOMEM;
  951. closure_init(&d->cl, NULL);
  952. set_closure_fn(&d->cl, flash_dev_flush, system_wq);
  953. kobject_init(&d->kobj, &bch_flash_dev_ktype);
  954. if (bcache_device_init(d, block_bytes(c), u->sectors))
  955. goto err;
  956. bcache_device_attach(d, c, u - c->uuids);
  957. bch_flash_dev_request_init(d);
  958. add_disk(d->disk);
  959. if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
  960. goto err;
  961. bcache_device_link(d, c, "volume");
  962. return 0;
  963. err:
  964. kobject_put(&d->kobj);
  965. return -ENOMEM;
  966. }
  967. static int flash_devs_run(struct cache_set *c)
  968. {
  969. int ret = 0;
  970. struct uuid_entry *u;
  971. for (u = c->uuids;
  972. u < c->uuids + c->nr_uuids && !ret;
  973. u++)
  974. if (UUID_FLASH_ONLY(u))
  975. ret = flash_dev_run(c, u);
  976. return ret;
  977. }
  978. int bch_flash_dev_create(struct cache_set *c, uint64_t size)
  979. {
  980. struct uuid_entry *u;
  981. if (test_bit(CACHE_SET_STOPPING, &c->flags))
  982. return -EINTR;
  983. if (!test_bit(CACHE_SET_RUNNING, &c->flags))
  984. return -EPERM;
  985. u = uuid_find_empty(c);
  986. if (!u) {
  987. pr_err("Can't create volume, no room for UUID");
  988. return -EINVAL;
  989. }
  990. get_random_bytes(u->uuid, 16);
  991. memset(u->label, 0, 32);
  992. u->first_reg = u->last_reg = cpu_to_le32(get_seconds());
  993. SET_UUID_FLASH_ONLY(u, 1);
  994. u->sectors = size >> 9;
  995. bch_uuid_write(c);
  996. return flash_dev_run(c, u);
  997. }
  998. /* Cache set */
  999. __printf(2, 3)
  1000. bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
  1001. {
  1002. va_list args;
  1003. if (c->on_error != ON_ERROR_PANIC &&
  1004. test_bit(CACHE_SET_STOPPING, &c->flags))
  1005. return false;
  1006. /* XXX: we can be called from atomic context
  1007. acquire_console_sem();
  1008. */
  1009. printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
  1010. va_start(args, fmt);
  1011. vprintk(fmt, args);
  1012. va_end(args);
  1013. printk(", disabling caching\n");
  1014. if (c->on_error == ON_ERROR_PANIC)
  1015. panic("panic forced after error\n");
  1016. bch_cache_set_unregister(c);
  1017. return true;
  1018. }
  1019. void bch_cache_set_release(struct kobject *kobj)
  1020. {
  1021. struct cache_set *c = container_of(kobj, struct cache_set, kobj);
  1022. kfree(c);
  1023. module_put(THIS_MODULE);
  1024. }
  1025. static void cache_set_free(struct closure *cl)
  1026. {
  1027. struct cache_set *c = container_of(cl, struct cache_set, cl);
  1028. struct cache *ca;
  1029. unsigned i;
  1030. if (!IS_ERR_OR_NULL(c->debug))
  1031. debugfs_remove(c->debug);
  1032. bch_open_buckets_free(c);
  1033. bch_btree_cache_free(c);
  1034. bch_journal_free(c);
  1035. for_each_cache(ca, c, i)
  1036. if (ca) {
  1037. ca->set = NULL;
  1038. c->cache[ca->sb.nr_this_dev] = NULL;
  1039. kobject_put(&ca->kobj);
  1040. }
  1041. bch_bset_sort_state_free(&c->sort);
  1042. free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
  1043. if (c->moving_gc_wq)
  1044. destroy_workqueue(c->moving_gc_wq);
  1045. if (c->bio_split)
  1046. bioset_free(c->bio_split);
  1047. if (c->fill_iter)
  1048. mempool_destroy(c->fill_iter);
  1049. if (c->bio_meta)
  1050. mempool_destroy(c->bio_meta);
  1051. if (c->search)
  1052. mempool_destroy(c->search);
  1053. kfree(c->devices);
  1054. mutex_lock(&bch_register_lock);
  1055. list_del(&c->list);
  1056. mutex_unlock(&bch_register_lock);
  1057. pr_info("Cache set %pU unregistered", c->sb.set_uuid);
  1058. wake_up(&unregister_wait);
  1059. closure_debug_destroy(&c->cl);
  1060. kobject_put(&c->kobj);
  1061. }
  1062. static void cache_set_flush(struct closure *cl)
  1063. {
  1064. struct cache_set *c = container_of(cl, struct cache_set, caching);
  1065. struct cache *ca;
  1066. struct btree *b;
  1067. unsigned i;
  1068. if (!c)
  1069. closure_return(cl);
  1070. bch_cache_accounting_destroy(&c->accounting);
  1071. kobject_put(&c->internal);
  1072. kobject_del(&c->kobj);
  1073. if (c->gc_thread)
  1074. kthread_stop(c->gc_thread);
  1075. if (!IS_ERR_OR_NULL(c->root))
  1076. list_add(&c->root->list, &c->btree_cache);
  1077. /* Should skip this if we're unregistering because of an error */
  1078. list_for_each_entry(b, &c->btree_cache, list) {
  1079. mutex_lock(&b->write_lock);
  1080. if (btree_node_dirty(b))
  1081. __bch_btree_node_write(b, NULL);
  1082. mutex_unlock(&b->write_lock);
  1083. }
  1084. for_each_cache(ca, c, i)
  1085. if (ca->alloc_thread)
  1086. kthread_stop(ca->alloc_thread);
  1087. if (c->journal.cur) {
  1088. cancel_delayed_work_sync(&c->journal.work);
  1089. /* flush last journal entry if needed */
  1090. c->journal.work.work.func(&c->journal.work.work);
  1091. }
  1092. closure_return(cl);
  1093. }
  1094. static void __cache_set_unregister(struct closure *cl)
  1095. {
  1096. struct cache_set *c = container_of(cl, struct cache_set, caching);
  1097. struct cached_dev *dc;
  1098. size_t i;
  1099. mutex_lock(&bch_register_lock);
  1100. for (i = 0; i < c->nr_uuids; i++)
  1101. if (c->devices[i]) {
  1102. if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
  1103. test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
  1104. dc = container_of(c->devices[i],
  1105. struct cached_dev, disk);
  1106. bch_cached_dev_detach(dc);
  1107. } else {
  1108. bcache_device_stop(c->devices[i]);
  1109. }
  1110. }
  1111. mutex_unlock(&bch_register_lock);
  1112. continue_at(cl, cache_set_flush, system_wq);
  1113. }
  1114. void bch_cache_set_stop(struct cache_set *c)
  1115. {
  1116. if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
  1117. closure_queue(&c->caching);
  1118. }
  1119. void bch_cache_set_unregister(struct cache_set *c)
  1120. {
  1121. set_bit(CACHE_SET_UNREGISTERING, &c->flags);
  1122. bch_cache_set_stop(c);
  1123. }
  1124. #define alloc_bucket_pages(gfp, c) \
  1125. ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
  1126. struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
  1127. {
  1128. int iter_size;
  1129. struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
  1130. if (!c)
  1131. return NULL;
  1132. __module_get(THIS_MODULE);
  1133. closure_init(&c->cl, NULL);
  1134. set_closure_fn(&c->cl, cache_set_free, system_wq);
  1135. closure_init(&c->caching, &c->cl);
  1136. set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
  1137. /* Maybe create continue_at_noreturn() and use it here? */
  1138. closure_set_stopped(&c->cl);
  1139. closure_put(&c->cl);
  1140. kobject_init(&c->kobj, &bch_cache_set_ktype);
  1141. kobject_init(&c->internal, &bch_cache_set_internal_ktype);
  1142. bch_cache_accounting_init(&c->accounting, &c->cl);
  1143. memcpy(c->sb.set_uuid, sb->set_uuid, 16);
  1144. c->sb.block_size = sb->block_size;
  1145. c->sb.bucket_size = sb->bucket_size;
  1146. c->sb.nr_in_set = sb->nr_in_set;
  1147. c->sb.last_mount = sb->last_mount;
  1148. c->bucket_bits = ilog2(sb->bucket_size);
  1149. c->block_bits = ilog2(sb->block_size);
  1150. c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
  1151. c->btree_pages = bucket_pages(c);
  1152. if (c->btree_pages > BTREE_MAX_PAGES)
  1153. c->btree_pages = max_t(int, c->btree_pages / 4,
  1154. BTREE_MAX_PAGES);
  1155. sema_init(&c->sb_write_mutex, 1);
  1156. mutex_init(&c->bucket_lock);
  1157. init_waitqueue_head(&c->btree_cache_wait);
  1158. init_waitqueue_head(&c->bucket_wait);
  1159. sema_init(&c->uuid_write_mutex, 1);
  1160. spin_lock_init(&c->btree_gc_time.lock);
  1161. spin_lock_init(&c->btree_split_time.lock);
  1162. spin_lock_init(&c->btree_read_time.lock);
  1163. bch_moving_init_cache_set(c);
  1164. INIT_LIST_HEAD(&c->list);
  1165. INIT_LIST_HEAD(&c->cached_devs);
  1166. INIT_LIST_HEAD(&c->btree_cache);
  1167. INIT_LIST_HEAD(&c->btree_cache_freeable);
  1168. INIT_LIST_HEAD(&c->btree_cache_freed);
  1169. INIT_LIST_HEAD(&c->data_buckets);
  1170. c->search = mempool_create_slab_pool(32, bch_search_cache);
  1171. if (!c->search)
  1172. goto err;
  1173. iter_size = (sb->bucket_size / sb->block_size + 1) *
  1174. sizeof(struct btree_iter_set);
  1175. if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) ||
  1176. !(c->bio_meta = mempool_create_kmalloc_pool(2,
  1177. sizeof(struct bbio) + sizeof(struct bio_vec) *
  1178. bucket_pages(c))) ||
  1179. !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) ||
  1180. !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
  1181. !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
  1182. !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
  1183. WQ_MEM_RECLAIM, 0)) ||
  1184. bch_journal_alloc(c) ||
  1185. bch_btree_cache_alloc(c) ||
  1186. bch_open_buckets_alloc(c) ||
  1187. bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
  1188. goto err;
  1189. c->congested_read_threshold_us = 2000;
  1190. c->congested_write_threshold_us = 20000;
  1191. c->error_limit = 8 << IO_ERROR_SHIFT;
  1192. return c;
  1193. err:
  1194. bch_cache_set_unregister(c);
  1195. return NULL;
  1196. }
  1197. static void run_cache_set(struct cache_set *c)
  1198. {
  1199. const char *err = "cannot allocate memory";
  1200. struct cached_dev *dc, *t;
  1201. struct cache *ca;
  1202. struct closure cl;
  1203. unsigned i;
  1204. closure_init_stack(&cl);
  1205. for_each_cache(ca, c, i)
  1206. c->nbuckets += ca->sb.nbuckets;
  1207. if (CACHE_SYNC(&c->sb)) {
  1208. LIST_HEAD(journal);
  1209. struct bkey *k;
  1210. struct jset *j;
  1211. err = "cannot allocate memory for journal";
  1212. if (bch_journal_read(c, &journal))
  1213. goto err;
  1214. pr_debug("btree_journal_read() done");
  1215. err = "no journal entries found";
  1216. if (list_empty(&journal))
  1217. goto err;
  1218. j = &list_entry(journal.prev, struct journal_replay, list)->j;
  1219. err = "IO error reading priorities";
  1220. for_each_cache(ca, c, i)
  1221. prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
  1222. /*
  1223. * If prio_read() fails it'll call cache_set_error and we'll
  1224. * tear everything down right away, but if we perhaps checked
  1225. * sooner we could avoid journal replay.
  1226. */
  1227. k = &j->btree_root;
  1228. err = "bad btree root";
  1229. if (__bch_btree_ptr_invalid(c, k))
  1230. goto err;
  1231. err = "error reading btree root";
  1232. c->root = bch_btree_node_get(c, NULL, k, j->btree_level, true, NULL);
  1233. if (IS_ERR_OR_NULL(c->root))
  1234. goto err;
  1235. list_del_init(&c->root->list);
  1236. rw_unlock(true, c->root);
  1237. err = uuid_read(c, j, &cl);
  1238. if (err)
  1239. goto err;
  1240. err = "error in recovery";
  1241. if (bch_btree_check(c))
  1242. goto err;
  1243. bch_journal_mark(c, &journal);
  1244. bch_initial_gc_finish(c);
  1245. pr_debug("btree_check() done");
  1246. /*
  1247. * bcache_journal_next() can't happen sooner, or
  1248. * btree_gc_finish() will give spurious errors about last_gc >
  1249. * gc_gen - this is a hack but oh well.
  1250. */
  1251. bch_journal_next(&c->journal);
  1252. err = "error starting allocator thread";
  1253. for_each_cache(ca, c, i)
  1254. if (bch_cache_allocator_start(ca))
  1255. goto err;
  1256. /*
  1257. * First place it's safe to allocate: btree_check() and
  1258. * btree_gc_finish() have to run before we have buckets to
  1259. * allocate, and bch_bucket_alloc_set() might cause a journal
  1260. * entry to be written so bcache_journal_next() has to be called
  1261. * first.
  1262. *
  1263. * If the uuids were in the old format we have to rewrite them
  1264. * before the next journal entry is written:
  1265. */
  1266. if (j->version < BCACHE_JSET_VERSION_UUID)
  1267. __uuid_write(c);
  1268. bch_journal_replay(c, &journal);
  1269. } else {
  1270. pr_notice("invalidating existing data");
  1271. for_each_cache(ca, c, i) {
  1272. unsigned j;
  1273. ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
  1274. 2, SB_JOURNAL_BUCKETS);
  1275. for (j = 0; j < ca->sb.keys; j++)
  1276. ca->sb.d[j] = ca->sb.first_bucket + j;
  1277. }
  1278. bch_initial_gc_finish(c);
  1279. err = "error starting allocator thread";
  1280. for_each_cache(ca, c, i)
  1281. if (bch_cache_allocator_start(ca))
  1282. goto err;
  1283. mutex_lock(&c->bucket_lock);
  1284. for_each_cache(ca, c, i)
  1285. bch_prio_write(ca);
  1286. mutex_unlock(&c->bucket_lock);
  1287. err = "cannot allocate new UUID bucket";
  1288. if (__uuid_write(c))
  1289. goto err;
  1290. err = "cannot allocate new btree root";
  1291. c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
  1292. if (IS_ERR_OR_NULL(c->root))
  1293. goto err;
  1294. mutex_lock(&c->root->write_lock);
  1295. bkey_copy_key(&c->root->key, &MAX_KEY);
  1296. bch_btree_node_write(c->root, &cl);
  1297. mutex_unlock(&c->root->write_lock);
  1298. bch_btree_set_root(c->root);
  1299. rw_unlock(true, c->root);
  1300. /*
  1301. * We don't want to write the first journal entry until
  1302. * everything is set up - fortunately journal entries won't be
  1303. * written until the SET_CACHE_SYNC() here:
  1304. */
  1305. SET_CACHE_SYNC(&c->sb, true);
  1306. bch_journal_next(&c->journal);
  1307. bch_journal_meta(c, &cl);
  1308. }
  1309. err = "error starting gc thread";
  1310. if (bch_gc_thread_start(c))
  1311. goto err;
  1312. closure_sync(&cl);
  1313. c->sb.last_mount = get_seconds();
  1314. bcache_write_super(c);
  1315. list_for_each_entry_safe(dc, t, &uncached_devices, list)
  1316. bch_cached_dev_attach(dc, c);
  1317. flash_devs_run(c);
  1318. set_bit(CACHE_SET_RUNNING, &c->flags);
  1319. return;
  1320. err:
  1321. closure_sync(&cl);
  1322. /* XXX: test this, it's broken */
  1323. bch_cache_set_error(c, "%s", err);
  1324. }
  1325. static bool can_attach_cache(struct cache *ca, struct cache_set *c)
  1326. {
  1327. return ca->sb.block_size == c->sb.block_size &&
  1328. ca->sb.bucket_size == c->sb.bucket_size &&
  1329. ca->sb.nr_in_set == c->sb.nr_in_set;
  1330. }
  1331. static const char *register_cache_set(struct cache *ca)
  1332. {
  1333. char buf[12];
  1334. const char *err = "cannot allocate memory";
  1335. struct cache_set *c;
  1336. list_for_each_entry(c, &bch_cache_sets, list)
  1337. if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
  1338. if (c->cache[ca->sb.nr_this_dev])
  1339. return "duplicate cache set member";
  1340. if (!can_attach_cache(ca, c))
  1341. return "cache sb does not match set";
  1342. if (!CACHE_SYNC(&ca->sb))
  1343. SET_CACHE_SYNC(&c->sb, false);
  1344. goto found;
  1345. }
  1346. c = bch_cache_set_alloc(&ca->sb);
  1347. if (!c)
  1348. return err;
  1349. err = "error creating kobject";
  1350. if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
  1351. kobject_add(&c->internal, &c->kobj, "internal"))
  1352. goto err;
  1353. if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
  1354. goto err;
  1355. bch_debug_init_cache_set(c);
  1356. list_add(&c->list, &bch_cache_sets);
  1357. found:
  1358. sprintf(buf, "cache%i", ca->sb.nr_this_dev);
  1359. if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
  1360. sysfs_create_link(&c->kobj, &ca->kobj, buf))
  1361. goto err;
  1362. if (ca->sb.seq > c->sb.seq) {
  1363. c->sb.version = ca->sb.version;
  1364. memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
  1365. c->sb.flags = ca->sb.flags;
  1366. c->sb.seq = ca->sb.seq;
  1367. pr_debug("set version = %llu", c->sb.version);
  1368. }
  1369. kobject_get(&ca->kobj);
  1370. ca->set = c;
  1371. ca->set->cache[ca->sb.nr_this_dev] = ca;
  1372. c->cache_by_alloc[c->caches_loaded++] = ca;
  1373. if (c->caches_loaded == c->sb.nr_in_set)
  1374. run_cache_set(c);
  1375. return NULL;
  1376. err:
  1377. bch_cache_set_unregister(c);
  1378. return err;
  1379. }
  1380. /* Cache device */
  1381. void bch_cache_release(struct kobject *kobj)
  1382. {
  1383. struct cache *ca = container_of(kobj, struct cache, kobj);
  1384. unsigned i;
  1385. if (ca->set) {
  1386. BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
  1387. ca->set->cache[ca->sb.nr_this_dev] = NULL;
  1388. }
  1389. free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
  1390. kfree(ca->prio_buckets);
  1391. vfree(ca->buckets);
  1392. free_heap(&ca->heap);
  1393. free_fifo(&ca->free_inc);
  1394. for (i = 0; i < RESERVE_NR; i++)
  1395. free_fifo(&ca->free[i]);
  1396. if (ca->sb_bio.bi_inline_vecs[0].bv_page)
  1397. put_page(ca->sb_bio.bi_io_vec[0].bv_page);
  1398. if (!IS_ERR_OR_NULL(ca->bdev))
  1399. blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1400. kfree(ca);
  1401. module_put(THIS_MODULE);
  1402. }
  1403. static int cache_alloc(struct cache *ca)
  1404. {
  1405. size_t free;
  1406. struct bucket *b;
  1407. __module_get(THIS_MODULE);
  1408. kobject_init(&ca->kobj, &bch_cache_ktype);
  1409. bio_init(&ca->journal.bio);
  1410. ca->journal.bio.bi_max_vecs = 8;
  1411. ca->journal.bio.bi_io_vec = ca->journal.bio.bi_inline_vecs;
  1412. free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
  1413. if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
  1414. !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
  1415. !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
  1416. !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
  1417. !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
  1418. !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
  1419. !(ca->buckets = vzalloc(sizeof(struct bucket) *
  1420. ca->sb.nbuckets)) ||
  1421. !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) *
  1422. 2, GFP_KERNEL)) ||
  1423. !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)))
  1424. return -ENOMEM;
  1425. ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
  1426. for_each_bucket(b, ca)
  1427. atomic_set(&b->pin, 0);
  1428. return 0;
  1429. }
  1430. static int register_cache(struct cache_sb *sb, struct page *sb_page,
  1431. struct block_device *bdev, struct cache *ca)
  1432. {
  1433. char name[BDEVNAME_SIZE];
  1434. const char *err = NULL; /* must be set for any error case */
  1435. int ret = 0;
  1436. memcpy(&ca->sb, sb, sizeof(struct cache_sb));
  1437. ca->bdev = bdev;
  1438. ca->bdev->bd_holder = ca;
  1439. bio_init(&ca->sb_bio);
  1440. ca->sb_bio.bi_max_vecs = 1;
  1441. ca->sb_bio.bi_io_vec = ca->sb_bio.bi_inline_vecs;
  1442. ca->sb_bio.bi_io_vec[0].bv_page = sb_page;
  1443. get_page(sb_page);
  1444. if (blk_queue_discard(bdev_get_queue(ca->bdev)))
  1445. ca->discard = CACHE_DISCARD(&ca->sb);
  1446. ret = cache_alloc(ca);
  1447. if (ret != 0) {
  1448. if (ret == -ENOMEM)
  1449. err = "cache_alloc(): -ENOMEM";
  1450. else
  1451. err = "cache_alloc(): unknown error";
  1452. goto err;
  1453. }
  1454. if (kobject_add(&ca->kobj, &part_to_dev(bdev->bd_part)->kobj, "bcache")) {
  1455. err = "error calling kobject_add";
  1456. ret = -ENOMEM;
  1457. goto out;
  1458. }
  1459. mutex_lock(&bch_register_lock);
  1460. err = register_cache_set(ca);
  1461. mutex_unlock(&bch_register_lock);
  1462. if (err) {
  1463. ret = -ENODEV;
  1464. goto out;
  1465. }
  1466. pr_info("registered cache device %s", bdevname(bdev, name));
  1467. out:
  1468. kobject_put(&ca->kobj);
  1469. err:
  1470. if (err)
  1471. pr_notice("error opening %s: %s", bdevname(bdev, name), err);
  1472. return ret;
  1473. }
  1474. /* Global interfaces/init */
  1475. static ssize_t register_bcache(struct kobject *, struct kobj_attribute *,
  1476. const char *, size_t);
  1477. kobj_attribute_write(register, register_bcache);
  1478. kobj_attribute_write(register_quiet, register_bcache);
  1479. static bool bch_is_open_backing(struct block_device *bdev) {
  1480. struct cache_set *c, *tc;
  1481. struct cached_dev *dc, *t;
  1482. list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
  1483. list_for_each_entry_safe(dc, t, &c->cached_devs, list)
  1484. if (dc->bdev == bdev)
  1485. return true;
  1486. list_for_each_entry_safe(dc, t, &uncached_devices, list)
  1487. if (dc->bdev == bdev)
  1488. return true;
  1489. return false;
  1490. }
  1491. static bool bch_is_open_cache(struct block_device *bdev) {
  1492. struct cache_set *c, *tc;
  1493. struct cache *ca;
  1494. unsigned i;
  1495. list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
  1496. for_each_cache(ca, c, i)
  1497. if (ca->bdev == bdev)
  1498. return true;
  1499. return false;
  1500. }
  1501. static bool bch_is_open(struct block_device *bdev) {
  1502. return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
  1503. }
  1504. static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
  1505. const char *buffer, size_t size)
  1506. {
  1507. ssize_t ret = size;
  1508. const char *err = "cannot allocate memory";
  1509. char *path = NULL;
  1510. struct cache_sb *sb = NULL;
  1511. struct block_device *bdev = NULL;
  1512. struct page *sb_page = NULL;
  1513. if (!try_module_get(THIS_MODULE))
  1514. return -EBUSY;
  1515. if (!(path = kstrndup(buffer, size, GFP_KERNEL)) ||
  1516. !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL)))
  1517. goto err;
  1518. err = "failed to open device";
  1519. bdev = blkdev_get_by_path(strim(path),
  1520. FMODE_READ|FMODE_WRITE|FMODE_EXCL,
  1521. sb);
  1522. if (IS_ERR(bdev)) {
  1523. if (bdev == ERR_PTR(-EBUSY)) {
  1524. bdev = lookup_bdev(strim(path));
  1525. mutex_lock(&bch_register_lock);
  1526. if (!IS_ERR(bdev) && bch_is_open(bdev))
  1527. err = "device already registered";
  1528. else
  1529. err = "device busy";
  1530. mutex_unlock(&bch_register_lock);
  1531. if (attr == &ksysfs_register_quiet)
  1532. goto out;
  1533. }
  1534. goto err;
  1535. }
  1536. err = "failed to set blocksize";
  1537. if (set_blocksize(bdev, 4096))
  1538. goto err_close;
  1539. err = read_super(sb, bdev, &sb_page);
  1540. if (err)
  1541. goto err_close;
  1542. if (SB_IS_BDEV(sb)) {
  1543. struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
  1544. if (!dc)
  1545. goto err_close;
  1546. mutex_lock(&bch_register_lock);
  1547. register_bdev(sb, sb_page, bdev, dc);
  1548. mutex_unlock(&bch_register_lock);
  1549. } else {
  1550. struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  1551. if (!ca)
  1552. goto err_close;
  1553. if (register_cache(sb, sb_page, bdev, ca) != 0)
  1554. goto err_close;
  1555. }
  1556. out:
  1557. if (sb_page)
  1558. put_page(sb_page);
  1559. kfree(sb);
  1560. kfree(path);
  1561. module_put(THIS_MODULE);
  1562. return ret;
  1563. err_close:
  1564. blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  1565. err:
  1566. pr_info("error opening %s: %s", path, err);
  1567. ret = -EINVAL;
  1568. goto out;
  1569. }
  1570. static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
  1571. {
  1572. if (code == SYS_DOWN ||
  1573. code == SYS_HALT ||
  1574. code == SYS_POWER_OFF) {
  1575. DEFINE_WAIT(wait);
  1576. unsigned long start = jiffies;
  1577. bool stopped = false;
  1578. struct cache_set *c, *tc;
  1579. struct cached_dev *dc, *tdc;
  1580. mutex_lock(&bch_register_lock);
  1581. if (list_empty(&bch_cache_sets) &&
  1582. list_empty(&uncached_devices))
  1583. goto out;
  1584. pr_info("Stopping all devices:");
  1585. list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
  1586. bch_cache_set_stop(c);
  1587. list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
  1588. bcache_device_stop(&dc->disk);
  1589. /* What's a condition variable? */
  1590. while (1) {
  1591. long timeout = start + 2 * HZ - jiffies;
  1592. stopped = list_empty(&bch_cache_sets) &&
  1593. list_empty(&uncached_devices);
  1594. if (timeout < 0 || stopped)
  1595. break;
  1596. prepare_to_wait(&unregister_wait, &wait,
  1597. TASK_UNINTERRUPTIBLE);
  1598. mutex_unlock(&bch_register_lock);
  1599. schedule_timeout(timeout);
  1600. mutex_lock(&bch_register_lock);
  1601. }
  1602. finish_wait(&unregister_wait, &wait);
  1603. if (stopped)
  1604. pr_info("All devices stopped");
  1605. else
  1606. pr_notice("Timeout waiting for devices to be closed");
  1607. out:
  1608. mutex_unlock(&bch_register_lock);
  1609. }
  1610. return NOTIFY_DONE;
  1611. }
  1612. static struct notifier_block reboot = {
  1613. .notifier_call = bcache_reboot,
  1614. .priority = INT_MAX, /* before any real devices */
  1615. };
  1616. static void bcache_exit(void)
  1617. {
  1618. bch_debug_exit();
  1619. bch_request_exit();
  1620. if (bcache_kobj)
  1621. kobject_put(bcache_kobj);
  1622. if (bcache_wq)
  1623. destroy_workqueue(bcache_wq);
  1624. if (bcache_major)
  1625. unregister_blkdev(bcache_major, "bcache");
  1626. unregister_reboot_notifier(&reboot);
  1627. }
  1628. static int __init bcache_init(void)
  1629. {
  1630. static const struct attribute *files[] = {
  1631. &ksysfs_register.attr,
  1632. &ksysfs_register_quiet.attr,
  1633. NULL
  1634. };
  1635. mutex_init(&bch_register_lock);
  1636. init_waitqueue_head(&unregister_wait);
  1637. register_reboot_notifier(&reboot);
  1638. closure_debug_init();
  1639. bcache_major = register_blkdev(0, "bcache");
  1640. if (bcache_major < 0) {
  1641. unregister_reboot_notifier(&reboot);
  1642. return bcache_major;
  1643. }
  1644. if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) ||
  1645. !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
  1646. sysfs_create_files(bcache_kobj, files) ||
  1647. bch_request_init() ||
  1648. bch_debug_init(bcache_kobj))
  1649. goto err;
  1650. return 0;
  1651. err:
  1652. bcache_exit();
  1653. return -ENOMEM;
  1654. }
  1655. module_exit(bcache_exit);
  1656. module_init(bcache_init);