pblk-init.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
  3. * Copyright (C) 2016 CNEX Labs
  4. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  5. * Matias Bjorling <matias@cnexlabs.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License version
  9. * 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * Implementation of a physical block-device target for Open-channel SSDs.
  17. *
  18. * pblk-init.c - pblk's initialization.
  19. */
  20. #include "pblk.h"
  21. static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
  22. *pblk_w_rq_cache;
  23. static DECLARE_RWSEM(pblk_lock);
  24. struct bio_set *pblk_bio_set;
  25. static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
  26. struct bio *bio)
  27. {
  28. int ret;
  29. /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
  30. * constraint. Writes can be of arbitrary size.
  31. */
  32. if (bio_data_dir(bio) == READ) {
  33. blk_queue_split(q, &bio);
  34. ret = pblk_submit_read(pblk, bio);
  35. if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
  36. bio_put(bio);
  37. return ret;
  38. }
  39. /* Prevent deadlock in the case of a modest LUN configuration and large
  40. * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
  41. * available for user I/O.
  42. */
  43. if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
  44. blk_queue_split(q, &bio);
  45. return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
  46. }
  47. static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
  48. {
  49. struct pblk *pblk = q->queuedata;
  50. if (bio_op(bio) == REQ_OP_DISCARD) {
  51. pblk_discard(pblk, bio);
  52. if (!(bio->bi_opf & REQ_PREFLUSH)) {
  53. bio_endio(bio);
  54. return BLK_QC_T_NONE;
  55. }
  56. }
  57. switch (pblk_rw_io(q, pblk, bio)) {
  58. case NVM_IO_ERR:
  59. bio_io_error(bio);
  60. break;
  61. case NVM_IO_DONE:
  62. bio_endio(bio);
  63. break;
  64. }
  65. return BLK_QC_T_NONE;
  66. }
  67. static size_t pblk_trans_map_size(struct pblk *pblk)
  68. {
  69. int entry_size = 8;
  70. if (pblk->ppaf_bitsize < 32)
  71. entry_size = 4;
  72. return entry_size * pblk->rl.nr_secs;
  73. }
  74. #ifdef CONFIG_NVM_DEBUG
  75. static u32 pblk_l2p_crc(struct pblk *pblk)
  76. {
  77. size_t map_size;
  78. u32 crc = ~(u32)0;
  79. map_size = pblk_trans_map_size(pblk);
  80. crc = crc32_le(crc, pblk->trans_map, map_size);
  81. return crc;
  82. }
  83. #endif
  84. static void pblk_l2p_free(struct pblk *pblk)
  85. {
  86. vfree(pblk->trans_map);
  87. }
  88. static int pblk_l2p_init(struct pblk *pblk)
  89. {
  90. sector_t i;
  91. struct ppa_addr ppa;
  92. size_t map_size;
  93. map_size = pblk_trans_map_size(pblk);
  94. pblk->trans_map = vmalloc(map_size);
  95. if (!pblk->trans_map)
  96. return -ENOMEM;
  97. pblk_ppa_set_empty(&ppa);
  98. for (i = 0; i < pblk->rl.nr_secs; i++)
  99. pblk_trans_map_set(pblk, i, ppa);
  100. return 0;
  101. }
  102. static void pblk_rwb_free(struct pblk *pblk)
  103. {
  104. if (pblk_rb_tear_down_check(&pblk->rwb))
  105. pr_err("pblk: write buffer error on tear down\n");
  106. pblk_rb_data_free(&pblk->rwb);
  107. vfree(pblk_rb_entries_ref(&pblk->rwb));
  108. }
  109. static int pblk_rwb_init(struct pblk *pblk)
  110. {
  111. struct nvm_tgt_dev *dev = pblk->dev;
  112. struct nvm_geo *geo = &dev->geo;
  113. struct pblk_rb_entry *entries;
  114. unsigned long nr_entries;
  115. unsigned int power_size, power_seg_sz;
  116. nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
  117. entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
  118. if (!entries)
  119. return -ENOMEM;
  120. power_size = get_count_order(nr_entries);
  121. power_seg_sz = get_count_order(geo->sec_size);
  122. return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
  123. }
  124. /* Minimum pages needed within a lun */
  125. #define ADDR_POOL_SIZE 64
  126. static int pblk_set_ppaf(struct pblk *pblk)
  127. {
  128. struct nvm_tgt_dev *dev = pblk->dev;
  129. struct nvm_geo *geo = &dev->geo;
  130. struct nvm_addr_format ppaf = geo->ppaf;
  131. int power_len;
  132. /* Re-calculate channel and lun format to adapt to configuration */
  133. power_len = get_count_order(geo->nr_chnls);
  134. if (1 << power_len != geo->nr_chnls) {
  135. pr_err("pblk: supports only power-of-two channel config.\n");
  136. return -EINVAL;
  137. }
  138. ppaf.ch_len = power_len;
  139. power_len = get_count_order(geo->nr_luns);
  140. if (1 << power_len != geo->nr_luns) {
  141. pr_err("pblk: supports only power-of-two LUN config.\n");
  142. return -EINVAL;
  143. }
  144. ppaf.lun_len = power_len;
  145. pblk->ppaf.sec_offset = 0;
  146. pblk->ppaf.pln_offset = ppaf.sect_len;
  147. pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
  148. pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
  149. pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
  150. pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
  151. pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
  152. pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
  153. pblk->ppaf.pln_offset;
  154. pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
  155. pblk->ppaf.ch_offset;
  156. pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
  157. pblk->ppaf.lun_offset;
  158. pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
  159. pblk->ppaf.pg_offset;
  160. pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
  161. pblk->ppaf.blk_offset;
  162. pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
  163. return 0;
  164. }
  165. static int pblk_init_global_caches(struct pblk *pblk)
  166. {
  167. down_write(&pblk_lock);
  168. pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
  169. sizeof(struct pblk_line_ws), 0, 0, NULL);
  170. if (!pblk_ws_cache) {
  171. up_write(&pblk_lock);
  172. return -ENOMEM;
  173. }
  174. pblk_rec_cache = kmem_cache_create("pblk_rec",
  175. sizeof(struct pblk_rec_ctx), 0, 0, NULL);
  176. if (!pblk_rec_cache) {
  177. kmem_cache_destroy(pblk_ws_cache);
  178. up_write(&pblk_lock);
  179. return -ENOMEM;
  180. }
  181. pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
  182. 0, 0, NULL);
  183. if (!pblk_g_rq_cache) {
  184. kmem_cache_destroy(pblk_ws_cache);
  185. kmem_cache_destroy(pblk_rec_cache);
  186. up_write(&pblk_lock);
  187. return -ENOMEM;
  188. }
  189. pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
  190. 0, 0, NULL);
  191. if (!pblk_w_rq_cache) {
  192. kmem_cache_destroy(pblk_ws_cache);
  193. kmem_cache_destroy(pblk_rec_cache);
  194. kmem_cache_destroy(pblk_g_rq_cache);
  195. up_write(&pblk_lock);
  196. return -ENOMEM;
  197. }
  198. up_write(&pblk_lock);
  199. return 0;
  200. }
  201. static void pblk_free_global_caches(struct pblk *pblk)
  202. {
  203. kmem_cache_destroy(pblk_ws_cache);
  204. kmem_cache_destroy(pblk_rec_cache);
  205. kmem_cache_destroy(pblk_g_rq_cache);
  206. kmem_cache_destroy(pblk_w_rq_cache);
  207. }
  208. static int pblk_core_init(struct pblk *pblk)
  209. {
  210. struct nvm_tgt_dev *dev = pblk->dev;
  211. struct nvm_geo *geo = &dev->geo;
  212. pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
  213. geo->nr_planes * geo->all_luns;
  214. if (pblk_init_global_caches(pblk))
  215. return -ENOMEM;
  216. /* Internal bios can be at most the sectors signaled by the device. */
  217. pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev),
  218. 0);
  219. if (!pblk->page_bio_pool)
  220. goto free_global_caches;
  221. pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
  222. pblk_ws_cache);
  223. if (!pblk->gen_ws_pool)
  224. goto free_page_bio_pool;
  225. pblk->rec_pool = mempool_create_slab_pool(geo->all_luns,
  226. pblk_rec_cache);
  227. if (!pblk->rec_pool)
  228. goto free_gen_ws_pool;
  229. pblk->r_rq_pool = mempool_create_slab_pool(geo->all_luns,
  230. pblk_g_rq_cache);
  231. if (!pblk->r_rq_pool)
  232. goto free_rec_pool;
  233. pblk->e_rq_pool = mempool_create_slab_pool(geo->all_luns,
  234. pblk_g_rq_cache);
  235. if (!pblk->e_rq_pool)
  236. goto free_r_rq_pool;
  237. pblk->w_rq_pool = mempool_create_slab_pool(geo->all_luns,
  238. pblk_w_rq_cache);
  239. if (!pblk->w_rq_pool)
  240. goto free_e_rq_pool;
  241. pblk->close_wq = alloc_workqueue("pblk-close-wq",
  242. WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
  243. if (!pblk->close_wq)
  244. goto free_w_rq_pool;
  245. pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
  246. WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
  247. if (!pblk->bb_wq)
  248. goto free_close_wq;
  249. pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
  250. WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
  251. if (!pblk->r_end_wq)
  252. goto free_bb_wq;
  253. if (pblk_set_ppaf(pblk))
  254. goto free_r_end_wq;
  255. if (pblk_rwb_init(pblk))
  256. goto free_r_end_wq;
  257. INIT_LIST_HEAD(&pblk->compl_list);
  258. return 0;
  259. free_r_end_wq:
  260. destroy_workqueue(pblk->r_end_wq);
  261. free_bb_wq:
  262. destroy_workqueue(pblk->bb_wq);
  263. free_close_wq:
  264. destroy_workqueue(pblk->close_wq);
  265. free_w_rq_pool:
  266. mempool_destroy(pblk->w_rq_pool);
  267. free_e_rq_pool:
  268. mempool_destroy(pblk->e_rq_pool);
  269. free_r_rq_pool:
  270. mempool_destroy(pblk->r_rq_pool);
  271. free_rec_pool:
  272. mempool_destroy(pblk->rec_pool);
  273. free_gen_ws_pool:
  274. mempool_destroy(pblk->gen_ws_pool);
  275. free_page_bio_pool:
  276. mempool_destroy(pblk->page_bio_pool);
  277. free_global_caches:
  278. pblk_free_global_caches(pblk);
  279. return -ENOMEM;
  280. }
  281. static void pblk_core_free(struct pblk *pblk)
  282. {
  283. if (pblk->close_wq)
  284. destroy_workqueue(pblk->close_wq);
  285. if (pblk->r_end_wq)
  286. destroy_workqueue(pblk->r_end_wq);
  287. if (pblk->bb_wq)
  288. destroy_workqueue(pblk->bb_wq);
  289. mempool_destroy(pblk->page_bio_pool);
  290. mempool_destroy(pblk->gen_ws_pool);
  291. mempool_destroy(pblk->rec_pool);
  292. mempool_destroy(pblk->r_rq_pool);
  293. mempool_destroy(pblk->e_rq_pool);
  294. mempool_destroy(pblk->w_rq_pool);
  295. pblk_rwb_free(pblk);
  296. pblk_free_global_caches(pblk);
  297. }
  298. static void pblk_luns_free(struct pblk *pblk)
  299. {
  300. kfree(pblk->luns);
  301. }
  302. static void pblk_free_line_bitmaps(struct pblk_line *line)
  303. {
  304. kfree(line->blk_bitmap);
  305. kfree(line->erase_bitmap);
  306. }
  307. static void pblk_lines_free(struct pblk *pblk)
  308. {
  309. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  310. struct pblk_line *line;
  311. int i;
  312. spin_lock(&l_mg->free_lock);
  313. for (i = 0; i < l_mg->nr_lines; i++) {
  314. line = &pblk->lines[i];
  315. pblk_line_free(pblk, line);
  316. pblk_free_line_bitmaps(line);
  317. }
  318. spin_unlock(&l_mg->free_lock);
  319. }
  320. static void pblk_line_meta_free(struct pblk *pblk)
  321. {
  322. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  323. int i;
  324. kfree(l_mg->bb_template);
  325. kfree(l_mg->bb_aux);
  326. kfree(l_mg->vsc_list);
  327. for (i = 0; i < PBLK_DATA_LINES; i++) {
  328. kfree(l_mg->sline_meta[i]);
  329. pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
  330. kfree(l_mg->eline_meta[i]);
  331. }
  332. kfree(pblk->lines);
  333. }
  334. static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
  335. {
  336. struct nvm_geo *geo = &dev->geo;
  337. struct ppa_addr ppa;
  338. u8 *blks;
  339. int nr_blks, ret;
  340. nr_blks = geo->nr_chks * geo->plane_mode;
  341. blks = kmalloc(nr_blks, GFP_KERNEL);
  342. if (!blks)
  343. return -ENOMEM;
  344. ppa.ppa = 0;
  345. ppa.g.ch = rlun->bppa.g.ch;
  346. ppa.g.lun = rlun->bppa.g.lun;
  347. ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
  348. if (ret)
  349. goto out;
  350. nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
  351. if (nr_blks < 0) {
  352. ret = nr_blks;
  353. goto out;
  354. }
  355. rlun->bb_list = blks;
  356. return 0;
  357. out:
  358. kfree(blks);
  359. return ret;
  360. }
  361. static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line,
  362. int blk_per_line)
  363. {
  364. struct nvm_tgt_dev *dev = pblk->dev;
  365. struct nvm_geo *geo = &dev->geo;
  366. struct pblk_lun *rlun;
  367. int bb_cnt = 0;
  368. int i;
  369. for (i = 0; i < blk_per_line; i++) {
  370. rlun = &pblk->luns[i];
  371. if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
  372. continue;
  373. set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap);
  374. bb_cnt++;
  375. }
  376. return bb_cnt;
  377. }
  378. static int pblk_alloc_line_bitmaps(struct pblk *pblk, struct pblk_line *line)
  379. {
  380. struct pblk_line_meta *lm = &pblk->lm;
  381. line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
  382. if (!line->blk_bitmap)
  383. return -ENOMEM;
  384. line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
  385. if (!line->erase_bitmap) {
  386. kfree(line->blk_bitmap);
  387. return -ENOMEM;
  388. }
  389. return 0;
  390. }
  391. static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
  392. {
  393. struct nvm_tgt_dev *dev = pblk->dev;
  394. struct nvm_geo *geo = &dev->geo;
  395. struct pblk_lun *rlun;
  396. int i, ret;
  397. /* TODO: Implement unbalanced LUN support */
  398. if (geo->nr_luns < 0) {
  399. pr_err("pblk: unbalanced LUN config.\n");
  400. return -EINVAL;
  401. }
  402. pblk->luns = kcalloc(geo->all_luns, sizeof(struct pblk_lun),
  403. GFP_KERNEL);
  404. if (!pblk->luns)
  405. return -ENOMEM;
  406. for (i = 0; i < geo->all_luns; i++) {
  407. /* Stripe across channels */
  408. int ch = i % geo->nr_chnls;
  409. int lun_raw = i / geo->nr_chnls;
  410. int lunid = lun_raw + ch * geo->nr_luns;
  411. rlun = &pblk->luns[i];
  412. rlun->bppa = luns[lunid];
  413. sema_init(&rlun->wr_sem, 1);
  414. ret = pblk_bb_discovery(dev, rlun);
  415. if (ret) {
  416. while (--i >= 0)
  417. kfree(pblk->luns[i].bb_list);
  418. return ret;
  419. }
  420. }
  421. return 0;
  422. }
  423. static int pblk_lines_configure(struct pblk *pblk, int flags)
  424. {
  425. struct pblk_line *line = NULL;
  426. int ret = 0;
  427. if (!(flags & NVM_TARGET_FACTORY)) {
  428. line = pblk_recov_l2p(pblk);
  429. if (IS_ERR(line)) {
  430. pr_err("pblk: could not recover l2p table\n");
  431. ret = -EFAULT;
  432. }
  433. }
  434. #ifdef CONFIG_NVM_DEBUG
  435. pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
  436. #endif
  437. /* Free full lines directly as GC has not been started yet */
  438. pblk_gc_free_full_lines(pblk);
  439. if (!line) {
  440. /* Configure next line for user data */
  441. line = pblk_line_get_first_data(pblk);
  442. if (!line) {
  443. pr_err("pblk: line list corrupted\n");
  444. ret = -EFAULT;
  445. }
  446. }
  447. return ret;
  448. }
  449. /* See comment over struct line_emeta definition */
  450. static unsigned int calc_emeta_len(struct pblk *pblk)
  451. {
  452. struct pblk_line_meta *lm = &pblk->lm;
  453. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  454. struct nvm_tgt_dev *dev = pblk->dev;
  455. struct nvm_geo *geo = &dev->geo;
  456. /* Round to sector size so that lba_list starts on its own sector */
  457. lm->emeta_sec[1] = DIV_ROUND_UP(
  458. sizeof(struct line_emeta) + lm->blk_bitmap_len,
  459. geo->sec_size);
  460. lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size;
  461. /* Round to sector size so that vsc_list starts on its own sector */
  462. lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
  463. lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
  464. geo->sec_size);
  465. lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size;
  466. lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
  467. geo->sec_size);
  468. lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size;
  469. lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
  470. return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
  471. }
  472. static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
  473. {
  474. struct nvm_tgt_dev *dev = pblk->dev;
  475. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  476. struct pblk_line_meta *lm = &pblk->lm;
  477. struct nvm_geo *geo = &dev->geo;
  478. sector_t provisioned;
  479. int sec_meta, blk_meta;
  480. if (geo->op == NVM_TARGET_DEFAULT_OP)
  481. pblk->op = PBLK_DEFAULT_OP;
  482. else
  483. pblk->op = geo->op;
  484. provisioned = nr_free_blks;
  485. provisioned *= (100 - pblk->op);
  486. sector_div(provisioned, 100);
  487. pblk->op_blks = nr_free_blks - provisioned;
  488. /* Internally pblk manages all free blocks, but all calculations based
  489. * on user capacity consider only provisioned blocks
  490. */
  491. pblk->rl.total_blocks = nr_free_blks;
  492. pblk->rl.nr_secs = nr_free_blks * geo->sec_per_chk;
  493. /* Consider sectors used for metadata */
  494. sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
  495. blk_meta = DIV_ROUND_UP(sec_meta, geo->sec_per_chk);
  496. pblk->capacity = (provisioned - blk_meta) * geo->sec_per_chk;
  497. atomic_set(&pblk->rl.free_blocks, nr_free_blks);
  498. atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
  499. }
  500. static int pblk_lines_alloc_metadata(struct pblk *pblk)
  501. {
  502. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  503. struct pblk_line_meta *lm = &pblk->lm;
  504. int i;
  505. /* smeta is always small enough to fit on a kmalloc memory allocation,
  506. * emeta depends on the number of LUNs allocated to the pblk instance
  507. */
  508. for (i = 0; i < PBLK_DATA_LINES; i++) {
  509. l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
  510. if (!l_mg->sline_meta[i])
  511. goto fail_free_smeta;
  512. }
  513. /* emeta allocates three different buffers for managing metadata with
  514. * in-memory and in-media layouts
  515. */
  516. for (i = 0; i < PBLK_DATA_LINES; i++) {
  517. struct pblk_emeta *emeta;
  518. emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
  519. if (!emeta)
  520. goto fail_free_emeta;
  521. if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
  522. l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
  523. emeta->buf = vmalloc(lm->emeta_len[0]);
  524. if (!emeta->buf) {
  525. kfree(emeta);
  526. goto fail_free_emeta;
  527. }
  528. emeta->nr_entries = lm->emeta_sec[0];
  529. l_mg->eline_meta[i] = emeta;
  530. } else {
  531. l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
  532. emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
  533. if (!emeta->buf) {
  534. kfree(emeta);
  535. goto fail_free_emeta;
  536. }
  537. emeta->nr_entries = lm->emeta_sec[0];
  538. l_mg->eline_meta[i] = emeta;
  539. }
  540. }
  541. l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
  542. if (!l_mg->vsc_list)
  543. goto fail_free_emeta;
  544. for (i = 0; i < l_mg->nr_lines; i++)
  545. l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
  546. return 0;
  547. fail_free_emeta:
  548. while (--i >= 0) {
  549. if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
  550. vfree(l_mg->eline_meta[i]->buf);
  551. else
  552. kfree(l_mg->eline_meta[i]->buf);
  553. kfree(l_mg->eline_meta[i]);
  554. }
  555. fail_free_smeta:
  556. for (i = 0; i < PBLK_DATA_LINES; i++)
  557. kfree(l_mg->sline_meta[i]);
  558. return -ENOMEM;
  559. }
  560. static int pblk_lines_init(struct pblk *pblk)
  561. {
  562. struct nvm_tgt_dev *dev = pblk->dev;
  563. struct nvm_geo *geo = &dev->geo;
  564. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  565. struct pblk_line_meta *lm = &pblk->lm;
  566. struct pblk_line *line;
  567. unsigned int smeta_len, emeta_len;
  568. long nr_bad_blks, nr_free_blks;
  569. int bb_distance, max_write_ppas, mod;
  570. int i, ret;
  571. pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
  572. max_write_ppas = pblk->min_write_pgs * geo->all_luns;
  573. pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
  574. max_write_ppas : nvm_max_phys_sects(dev);
  575. pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
  576. if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
  577. pr_err("pblk: cannot support device max_phys_sect\n");
  578. return -EINVAL;
  579. }
  580. div_u64_rem(geo->sec_per_chk, pblk->min_write_pgs, &mod);
  581. if (mod) {
  582. pr_err("pblk: bad configuration of sectors/pages\n");
  583. return -EINVAL;
  584. }
  585. l_mg->nr_lines = geo->nr_chks;
  586. l_mg->log_line = l_mg->data_line = NULL;
  587. l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
  588. l_mg->nr_free_lines = 0;
  589. bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  590. lm->sec_per_line = geo->sec_per_chk * geo->all_luns;
  591. lm->blk_per_line = geo->all_luns;
  592. lm->blk_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
  593. lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
  594. lm->lun_bitmap_len = BITS_TO_LONGS(geo->all_luns) * sizeof(long);
  595. lm->mid_thrs = lm->sec_per_line / 2;
  596. lm->high_thrs = lm->sec_per_line / 4;
  597. lm->meta_distance = (geo->all_luns / 2) * pblk->min_write_pgs;
  598. /* Calculate necessary pages for smeta. See comment over struct
  599. * line_smeta definition
  600. */
  601. i = 1;
  602. add_smeta_page:
  603. lm->smeta_sec = i * geo->sec_per_pl;
  604. lm->smeta_len = lm->smeta_sec * geo->sec_size;
  605. smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
  606. if (smeta_len > lm->smeta_len) {
  607. i++;
  608. goto add_smeta_page;
  609. }
  610. /* Calculate necessary pages for emeta. See comment over struct
  611. * line_emeta definition
  612. */
  613. i = 1;
  614. add_emeta_page:
  615. lm->emeta_sec[0] = i * geo->sec_per_pl;
  616. lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;
  617. emeta_len = calc_emeta_len(pblk);
  618. if (emeta_len > lm->emeta_len[0]) {
  619. i++;
  620. goto add_emeta_page;
  621. }
  622. lm->emeta_bb = geo->all_luns > i ? geo->all_luns - i : 0;
  623. lm->min_blk_line = 1;
  624. if (geo->all_luns > 1)
  625. lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
  626. lm->emeta_sec[0], geo->sec_per_chk);
  627. if (lm->min_blk_line > lm->blk_per_line) {
  628. pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
  629. lm->blk_per_line);
  630. ret = -EINVAL;
  631. goto fail;
  632. }
  633. ret = pblk_lines_alloc_metadata(pblk);
  634. if (ret)
  635. goto fail;
  636. l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
  637. if (!l_mg->bb_template) {
  638. ret = -ENOMEM;
  639. goto fail_free_meta;
  640. }
  641. l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
  642. if (!l_mg->bb_aux) {
  643. ret = -ENOMEM;
  644. goto fail_free_bb_template;
  645. }
  646. bb_distance = (geo->all_luns) * geo->sec_per_pl;
  647. for (i = 0; i < lm->sec_per_line; i += bb_distance)
  648. bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
  649. INIT_LIST_HEAD(&l_mg->free_list);
  650. INIT_LIST_HEAD(&l_mg->corrupt_list);
  651. INIT_LIST_HEAD(&l_mg->bad_list);
  652. INIT_LIST_HEAD(&l_mg->gc_full_list);
  653. INIT_LIST_HEAD(&l_mg->gc_high_list);
  654. INIT_LIST_HEAD(&l_mg->gc_mid_list);
  655. INIT_LIST_HEAD(&l_mg->gc_low_list);
  656. INIT_LIST_HEAD(&l_mg->gc_empty_list);
  657. INIT_LIST_HEAD(&l_mg->emeta_list);
  658. l_mg->gc_lists[0] = &l_mg->gc_high_list;
  659. l_mg->gc_lists[1] = &l_mg->gc_mid_list;
  660. l_mg->gc_lists[2] = &l_mg->gc_low_list;
  661. spin_lock_init(&l_mg->free_lock);
  662. spin_lock_init(&l_mg->close_lock);
  663. spin_lock_init(&l_mg->gc_lock);
  664. pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
  665. GFP_KERNEL);
  666. if (!pblk->lines) {
  667. ret = -ENOMEM;
  668. goto fail_free_bb_aux;
  669. }
  670. nr_free_blks = 0;
  671. for (i = 0; i < l_mg->nr_lines; i++) {
  672. int blk_in_line;
  673. line = &pblk->lines[i];
  674. line->pblk = pblk;
  675. line->id = i;
  676. line->type = PBLK_LINETYPE_FREE;
  677. line->state = PBLK_LINESTATE_FREE;
  678. line->gc_group = PBLK_LINEGC_NONE;
  679. line->vsc = &l_mg->vsc_list[i];
  680. spin_lock_init(&line->lock);
  681. ret = pblk_alloc_line_bitmaps(pblk, line);
  682. if (ret)
  683. goto fail_free_lines;
  684. nr_bad_blks = pblk_bb_line(pblk, line, lm->blk_per_line);
  685. if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
  686. pblk_free_line_bitmaps(line);
  687. ret = -EINVAL;
  688. goto fail_free_lines;
  689. }
  690. blk_in_line = lm->blk_per_line - nr_bad_blks;
  691. if (blk_in_line < lm->min_blk_line) {
  692. line->state = PBLK_LINESTATE_BAD;
  693. list_add_tail(&line->list, &l_mg->bad_list);
  694. continue;
  695. }
  696. nr_free_blks += blk_in_line;
  697. atomic_set(&line->blk_in_line, blk_in_line);
  698. l_mg->nr_free_lines++;
  699. list_add_tail(&line->list, &l_mg->free_list);
  700. }
  701. pblk_set_provision(pblk, nr_free_blks);
  702. /* Cleanup per-LUN bad block lists - managed within lines on run-time */
  703. for (i = 0; i < geo->all_luns; i++)
  704. kfree(pblk->luns[i].bb_list);
  705. return 0;
  706. fail_free_lines:
  707. while (--i >= 0)
  708. pblk_free_line_bitmaps(&pblk->lines[i]);
  709. fail_free_bb_aux:
  710. kfree(l_mg->bb_aux);
  711. fail_free_bb_template:
  712. kfree(l_mg->bb_template);
  713. fail_free_meta:
  714. pblk_line_meta_free(pblk);
  715. fail:
  716. for (i = 0; i < geo->all_luns; i++)
  717. kfree(pblk->luns[i].bb_list);
  718. return ret;
  719. }
  720. static int pblk_writer_init(struct pblk *pblk)
  721. {
  722. pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
  723. if (IS_ERR(pblk->writer_ts)) {
  724. int err = PTR_ERR(pblk->writer_ts);
  725. if (err != -EINTR)
  726. pr_err("pblk: could not allocate writer kthread (%d)\n",
  727. err);
  728. return err;
  729. }
  730. timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
  731. mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
  732. return 0;
  733. }
  734. static void pblk_writer_stop(struct pblk *pblk)
  735. {
  736. /* The pipeline must be stopped and the write buffer emptied before the
  737. * write thread is stopped
  738. */
  739. WARN(pblk_rb_read_count(&pblk->rwb),
  740. "Stopping not fully persisted write buffer\n");
  741. WARN(pblk_rb_sync_count(&pblk->rwb),
  742. "Stopping not fully synced write buffer\n");
  743. if (pblk->writer_ts)
  744. kthread_stop(pblk->writer_ts);
  745. del_timer(&pblk->wtimer);
  746. }
  747. static void pblk_free(struct pblk *pblk)
  748. {
  749. pblk_luns_free(pblk);
  750. pblk_lines_free(pblk);
  751. pblk_line_meta_free(pblk);
  752. pblk_core_free(pblk);
  753. pblk_l2p_free(pblk);
  754. kfree(pblk);
  755. }
  756. static void pblk_tear_down(struct pblk *pblk)
  757. {
  758. pblk_pipeline_stop(pblk);
  759. pblk_writer_stop(pblk);
  760. pblk_rb_sync_l2p(&pblk->rwb);
  761. pblk_rl_free(&pblk->rl);
  762. pr_debug("pblk: consistent tear down\n");
  763. }
  764. static void pblk_exit(void *private)
  765. {
  766. struct pblk *pblk = private;
  767. down_write(&pblk_lock);
  768. pblk_gc_exit(pblk);
  769. pblk_tear_down(pblk);
  770. #ifdef CONFIG_NVM_DEBUG
  771. pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
  772. #endif
  773. pblk_free(pblk);
  774. up_write(&pblk_lock);
  775. }
  776. static sector_t pblk_capacity(void *private)
  777. {
  778. struct pblk *pblk = private;
  779. return pblk->capacity * NR_PHY_IN_LOG;
  780. }
  781. static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
  782. int flags)
  783. {
  784. struct nvm_geo *geo = &dev->geo;
  785. struct request_queue *bqueue = dev->q;
  786. struct request_queue *tqueue = tdisk->queue;
  787. struct pblk *pblk;
  788. int ret;
  789. if (dev->identity.dom & NVM_RSP_L2P) {
  790. pr_err("pblk: host-side L2P table not supported. (%x)\n",
  791. dev->identity.dom);
  792. return ERR_PTR(-EINVAL);
  793. }
  794. pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
  795. if (!pblk)
  796. return ERR_PTR(-ENOMEM);
  797. pblk->dev = dev;
  798. pblk->disk = tdisk;
  799. pblk->state = PBLK_STATE_RUNNING;
  800. pblk->gc.gc_enabled = 0;
  801. spin_lock_init(&pblk->trans_lock);
  802. spin_lock_init(&pblk->lock);
  803. if (flags & NVM_TARGET_FACTORY)
  804. pblk_setup_uuid(pblk);
  805. #ifdef CONFIG_NVM_DEBUG
  806. atomic_long_set(&pblk->inflight_writes, 0);
  807. atomic_long_set(&pblk->padded_writes, 0);
  808. atomic_long_set(&pblk->padded_wb, 0);
  809. atomic_long_set(&pblk->nr_flush, 0);
  810. atomic_long_set(&pblk->req_writes, 0);
  811. atomic_long_set(&pblk->sub_writes, 0);
  812. atomic_long_set(&pblk->sync_writes, 0);
  813. atomic_long_set(&pblk->inflight_reads, 0);
  814. atomic_long_set(&pblk->cache_reads, 0);
  815. atomic_long_set(&pblk->sync_reads, 0);
  816. atomic_long_set(&pblk->recov_writes, 0);
  817. atomic_long_set(&pblk->recov_writes, 0);
  818. atomic_long_set(&pblk->recov_gc_writes, 0);
  819. atomic_long_set(&pblk->recov_gc_reads, 0);
  820. #endif
  821. atomic_long_set(&pblk->read_failed, 0);
  822. atomic_long_set(&pblk->read_empty, 0);
  823. atomic_long_set(&pblk->read_high_ecc, 0);
  824. atomic_long_set(&pblk->read_failed_gc, 0);
  825. atomic_long_set(&pblk->write_failed, 0);
  826. atomic_long_set(&pblk->erase_failed, 0);
  827. ret = pblk_luns_init(pblk, dev->luns);
  828. if (ret) {
  829. pr_err("pblk: could not initialize luns\n");
  830. goto fail;
  831. }
  832. ret = pblk_lines_init(pblk);
  833. if (ret) {
  834. pr_err("pblk: could not initialize lines\n");
  835. goto fail_free_luns;
  836. }
  837. ret = pblk_core_init(pblk);
  838. if (ret) {
  839. pr_err("pblk: could not initialize core\n");
  840. goto fail_free_line_meta;
  841. }
  842. ret = pblk_l2p_init(pblk);
  843. if (ret) {
  844. pr_err("pblk: could not initialize maps\n");
  845. goto fail_free_core;
  846. }
  847. ret = pblk_lines_configure(pblk, flags);
  848. if (ret) {
  849. pr_err("pblk: could not configure lines\n");
  850. goto fail_free_l2p;
  851. }
  852. ret = pblk_writer_init(pblk);
  853. if (ret) {
  854. if (ret != -EINTR)
  855. pr_err("pblk: could not initialize write thread\n");
  856. goto fail_free_lines;
  857. }
  858. ret = pblk_gc_init(pblk);
  859. if (ret) {
  860. pr_err("pblk: could not initialize gc\n");
  861. goto fail_stop_writer;
  862. }
  863. /* inherit the size from the underlying device */
  864. blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
  865. blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
  866. blk_queue_write_cache(tqueue, true, false);
  867. tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
  868. tqueue->limits.discard_alignment = 0;
  869. blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
  870. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
  871. pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
  872. tdisk->disk_name,
  873. geo->all_luns, pblk->l_mg.nr_lines,
  874. (unsigned long long)pblk->rl.nr_secs,
  875. pblk->rwb.nr_entries);
  876. wake_up_process(pblk->writer_ts);
  877. /* Check if we need to start GC */
  878. pblk_gc_should_kick(pblk);
  879. return pblk;
  880. fail_stop_writer:
  881. pblk_writer_stop(pblk);
  882. fail_free_lines:
  883. pblk_lines_free(pblk);
  884. fail_free_l2p:
  885. pblk_l2p_free(pblk);
  886. fail_free_core:
  887. pblk_core_free(pblk);
  888. fail_free_line_meta:
  889. pblk_line_meta_free(pblk);
  890. fail_free_luns:
  891. pblk_luns_free(pblk);
  892. fail:
  893. kfree(pblk);
  894. return ERR_PTR(ret);
  895. }
  896. /* physical block device target */
  897. static struct nvm_tgt_type tt_pblk = {
  898. .name = "pblk",
  899. .version = {1, 0, 0},
  900. .make_rq = pblk_make_rq,
  901. .capacity = pblk_capacity,
  902. .init = pblk_init,
  903. .exit = pblk_exit,
  904. .sysfs_init = pblk_sysfs_init,
  905. .sysfs_exit = pblk_sysfs_exit,
  906. .owner = THIS_MODULE,
  907. };
  908. static int __init pblk_module_init(void)
  909. {
  910. int ret;
  911. pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
  912. if (!pblk_bio_set)
  913. return -ENOMEM;
  914. ret = nvm_register_tgt_type(&tt_pblk);
  915. if (ret)
  916. bioset_free(pblk_bio_set);
  917. return ret;
  918. }
  919. static void pblk_module_exit(void)
  920. {
  921. bioset_free(pblk_bio_set);
  922. nvm_unregister_tgt_type(&tt_pblk);
  923. }
  924. module_init(pblk_module_init);
  925. module_exit(pblk_module_exit);
  926. MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
  927. MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
  928. MODULE_LICENSE("GPL v2");
  929. MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");