pblk-init.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
  3. * Copyright (C) 2016 CNEX Labs
  4. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  5. * Matias Bjorling <matias@cnexlabs.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License version
  9. * 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * Implementation of a physical block-device target for Open-channel SSDs.
  17. *
  18. * pblk-init.c - pblk's initialization.
  19. */
  20. #include "pblk.h"
  21. static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
  22. *pblk_w_rq_cache;
  23. static DECLARE_RWSEM(pblk_lock);
  24. struct bio_set *pblk_bio_set;
  25. static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
  26. struct bio *bio)
  27. {
  28. int ret;
  29. /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
  30. * constraint. Writes can be of arbitrary size.
  31. */
  32. if (bio_data_dir(bio) == READ) {
  33. blk_queue_split(q, &bio);
  34. ret = pblk_submit_read(pblk, bio);
  35. if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
  36. bio_put(bio);
  37. return ret;
  38. }
  39. /* Prevent deadlock in the case of a modest LUN configuration and large
  40. * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
  41. * available for user I/O.
  42. */
  43. if (pblk_get_secs(bio) > pblk_rl_max_io(&pblk->rl))
  44. blk_queue_split(q, &bio);
  45. return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
  46. }
  47. static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
  48. {
  49. struct pblk *pblk = q->queuedata;
  50. if (bio_op(bio) == REQ_OP_DISCARD) {
  51. pblk_discard(pblk, bio);
  52. if (!(bio->bi_opf & REQ_PREFLUSH)) {
  53. bio_endio(bio);
  54. return BLK_QC_T_NONE;
  55. }
  56. }
  57. switch (pblk_rw_io(q, pblk, bio)) {
  58. case NVM_IO_ERR:
  59. bio_io_error(bio);
  60. break;
  61. case NVM_IO_DONE:
  62. bio_endio(bio);
  63. break;
  64. }
  65. return BLK_QC_T_NONE;
  66. }
  67. static size_t pblk_trans_map_size(struct pblk *pblk)
  68. {
  69. int entry_size = 8;
  70. if (pblk->ppaf_bitsize < 32)
  71. entry_size = 4;
  72. return entry_size * pblk->rl.nr_secs;
  73. }
  74. #ifdef CONFIG_NVM_DEBUG
  75. static u32 pblk_l2p_crc(struct pblk *pblk)
  76. {
  77. size_t map_size;
  78. u32 crc = ~(u32)0;
  79. map_size = pblk_trans_map_size(pblk);
  80. crc = crc32_le(crc, pblk->trans_map, map_size);
  81. return crc;
  82. }
  83. #endif
  84. static void pblk_l2p_free(struct pblk *pblk)
  85. {
  86. vfree(pblk->trans_map);
  87. }
  88. static int pblk_l2p_init(struct pblk *pblk)
  89. {
  90. sector_t i;
  91. struct ppa_addr ppa;
  92. size_t map_size;
  93. map_size = pblk_trans_map_size(pblk);
  94. pblk->trans_map = vmalloc(map_size);
  95. if (!pblk->trans_map)
  96. return -ENOMEM;
  97. pblk_ppa_set_empty(&ppa);
  98. for (i = 0; i < pblk->rl.nr_secs; i++)
  99. pblk_trans_map_set(pblk, i, ppa);
  100. return 0;
  101. }
  102. static void pblk_rwb_free(struct pblk *pblk)
  103. {
  104. if (pblk_rb_tear_down_check(&pblk->rwb))
  105. pr_err("pblk: write buffer error on tear down\n");
  106. pblk_rb_data_free(&pblk->rwb);
  107. vfree(pblk_rb_entries_ref(&pblk->rwb));
  108. }
  109. static int pblk_rwb_init(struct pblk *pblk)
  110. {
  111. struct nvm_tgt_dev *dev = pblk->dev;
  112. struct nvm_geo *geo = &dev->geo;
  113. struct pblk_rb_entry *entries;
  114. unsigned long nr_entries;
  115. unsigned int power_size, power_seg_sz;
  116. nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
  117. entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
  118. if (!entries)
  119. return -ENOMEM;
  120. power_size = get_count_order(nr_entries);
  121. power_seg_sz = get_count_order(geo->sec_size);
  122. return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
  123. }
  124. /* Minimum pages needed within a lun */
  125. #define ADDR_POOL_SIZE 64
  126. static int pblk_set_ppaf(struct pblk *pblk)
  127. {
  128. struct nvm_tgt_dev *dev = pblk->dev;
  129. struct nvm_geo *geo = &dev->geo;
  130. struct nvm_addr_format ppaf = geo->ppaf;
  131. int power_len;
  132. /* Re-calculate channel and lun format to adapt to configuration */
  133. power_len = get_count_order(geo->nr_chnls);
  134. if (1 << power_len != geo->nr_chnls) {
  135. pr_err("pblk: supports only power-of-two channel config.\n");
  136. return -EINVAL;
  137. }
  138. ppaf.ch_len = power_len;
  139. power_len = get_count_order(geo->luns_per_chnl);
  140. if (1 << power_len != geo->luns_per_chnl) {
  141. pr_err("pblk: supports only power-of-two LUN config.\n");
  142. return -EINVAL;
  143. }
  144. ppaf.lun_len = power_len;
  145. pblk->ppaf.sec_offset = 0;
  146. pblk->ppaf.pln_offset = ppaf.sect_len;
  147. pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
  148. pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
  149. pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
  150. pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
  151. pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
  152. pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
  153. pblk->ppaf.pln_offset;
  154. pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
  155. pblk->ppaf.ch_offset;
  156. pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
  157. pblk->ppaf.lun_offset;
  158. pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
  159. pblk->ppaf.pg_offset;
  160. pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
  161. pblk->ppaf.blk_offset;
  162. pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
  163. return 0;
  164. }
  165. static int pblk_init_global_caches(struct pblk *pblk)
  166. {
  167. down_write(&pblk_lock);
  168. pblk_ws_cache = kmem_cache_create("pblk_blk_ws",
  169. sizeof(struct pblk_line_ws), 0, 0, NULL);
  170. if (!pblk_ws_cache) {
  171. up_write(&pblk_lock);
  172. return -ENOMEM;
  173. }
  174. pblk_rec_cache = kmem_cache_create("pblk_rec",
  175. sizeof(struct pblk_rec_ctx), 0, 0, NULL);
  176. if (!pblk_rec_cache) {
  177. kmem_cache_destroy(pblk_ws_cache);
  178. up_write(&pblk_lock);
  179. return -ENOMEM;
  180. }
  181. pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
  182. 0, 0, NULL);
  183. if (!pblk_g_rq_cache) {
  184. kmem_cache_destroy(pblk_ws_cache);
  185. kmem_cache_destroy(pblk_rec_cache);
  186. up_write(&pblk_lock);
  187. return -ENOMEM;
  188. }
  189. pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
  190. 0, 0, NULL);
  191. if (!pblk_w_rq_cache) {
  192. kmem_cache_destroy(pblk_ws_cache);
  193. kmem_cache_destroy(pblk_rec_cache);
  194. kmem_cache_destroy(pblk_g_rq_cache);
  195. up_write(&pblk_lock);
  196. return -ENOMEM;
  197. }
  198. up_write(&pblk_lock);
  199. return 0;
  200. }
  201. static void pblk_free_global_caches(struct pblk *pblk)
  202. {
  203. kmem_cache_destroy(pblk_ws_cache);
  204. kmem_cache_destroy(pblk_rec_cache);
  205. kmem_cache_destroy(pblk_g_rq_cache);
  206. kmem_cache_destroy(pblk_w_rq_cache);
  207. }
  208. static int pblk_core_init(struct pblk *pblk)
  209. {
  210. struct nvm_tgt_dev *dev = pblk->dev;
  211. struct nvm_geo *geo = &dev->geo;
  212. pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
  213. geo->nr_planes * geo->nr_luns;
  214. if (pblk_init_global_caches(pblk))
  215. return -ENOMEM;
  216. /* Internal bios can be at most the sectors signaled by the device. */
  217. pblk->page_bio_pool = mempool_create_page_pool(nvm_max_phys_sects(dev),
  218. 0);
  219. if (!pblk->page_bio_pool)
  220. goto free_global_caches;
  221. pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
  222. pblk_ws_cache);
  223. if (!pblk->gen_ws_pool)
  224. goto free_page_bio_pool;
  225. pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
  226. if (!pblk->rec_pool)
  227. goto free_gen_ws_pool;
  228. pblk->r_rq_pool = mempool_create_slab_pool(geo->nr_luns,
  229. pblk_g_rq_cache);
  230. if (!pblk->r_rq_pool)
  231. goto free_rec_pool;
  232. pblk->e_rq_pool = mempool_create_slab_pool(geo->nr_luns,
  233. pblk_g_rq_cache);
  234. if (!pblk->e_rq_pool)
  235. goto free_r_rq_pool;
  236. pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns,
  237. pblk_w_rq_cache);
  238. if (!pblk->w_rq_pool)
  239. goto free_e_rq_pool;
  240. pblk->close_wq = alloc_workqueue("pblk-close-wq",
  241. WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
  242. if (!pblk->close_wq)
  243. goto free_w_rq_pool;
  244. pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
  245. WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
  246. if (!pblk->bb_wq)
  247. goto free_close_wq;
  248. pblk->r_end_wq = alloc_workqueue("pblk-read-end-wq",
  249. WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
  250. if (!pblk->r_end_wq)
  251. goto free_bb_wq;
  252. if (pblk_set_ppaf(pblk))
  253. goto free_r_end_wq;
  254. if (pblk_rwb_init(pblk))
  255. goto free_r_end_wq;
  256. INIT_LIST_HEAD(&pblk->compl_list);
  257. return 0;
  258. free_r_end_wq:
  259. destroy_workqueue(pblk->r_end_wq);
  260. free_bb_wq:
  261. destroy_workqueue(pblk->bb_wq);
  262. free_close_wq:
  263. destroy_workqueue(pblk->close_wq);
  264. free_w_rq_pool:
  265. mempool_destroy(pblk->w_rq_pool);
  266. free_e_rq_pool:
  267. mempool_destroy(pblk->e_rq_pool);
  268. free_r_rq_pool:
  269. mempool_destroy(pblk->r_rq_pool);
  270. free_rec_pool:
  271. mempool_destroy(pblk->rec_pool);
  272. free_gen_ws_pool:
  273. mempool_destroy(pblk->gen_ws_pool);
  274. free_page_bio_pool:
  275. mempool_destroy(pblk->page_bio_pool);
  276. free_global_caches:
  277. pblk_free_global_caches(pblk);
  278. return -ENOMEM;
  279. }
  280. static void pblk_core_free(struct pblk *pblk)
  281. {
  282. if (pblk->close_wq)
  283. destroy_workqueue(pblk->close_wq);
  284. if (pblk->r_end_wq)
  285. destroy_workqueue(pblk->r_end_wq);
  286. if (pblk->bb_wq)
  287. destroy_workqueue(pblk->bb_wq);
  288. mempool_destroy(pblk->page_bio_pool);
  289. mempool_destroy(pblk->gen_ws_pool);
  290. mempool_destroy(pblk->rec_pool);
  291. mempool_destroy(pblk->r_rq_pool);
  292. mempool_destroy(pblk->e_rq_pool);
  293. mempool_destroy(pblk->w_rq_pool);
  294. pblk_free_global_caches(pblk);
  295. }
  296. static void pblk_luns_free(struct pblk *pblk)
  297. {
  298. kfree(pblk->luns);
  299. }
  300. static void pblk_free_line_bitmaps(struct pblk_line *line)
  301. {
  302. kfree(line->blk_bitmap);
  303. kfree(line->erase_bitmap);
  304. }
  305. static void pblk_lines_free(struct pblk *pblk)
  306. {
  307. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  308. struct pblk_line *line;
  309. int i;
  310. spin_lock(&l_mg->free_lock);
  311. for (i = 0; i < l_mg->nr_lines; i++) {
  312. line = &pblk->lines[i];
  313. pblk_line_free(pblk, line);
  314. pblk_free_line_bitmaps(line);
  315. }
  316. spin_unlock(&l_mg->free_lock);
  317. }
  318. static void pblk_line_meta_free(struct pblk *pblk)
  319. {
  320. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  321. int i;
  322. kfree(l_mg->bb_template);
  323. kfree(l_mg->bb_aux);
  324. kfree(l_mg->vsc_list);
  325. for (i = 0; i < PBLK_DATA_LINES; i++) {
  326. kfree(l_mg->sline_meta[i]);
  327. pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
  328. kfree(l_mg->eline_meta[i]);
  329. }
  330. kfree(pblk->lines);
  331. }
  332. static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
  333. {
  334. struct nvm_geo *geo = &dev->geo;
  335. struct ppa_addr ppa;
  336. u8 *blks;
  337. int nr_blks, ret;
  338. nr_blks = geo->blks_per_lun * geo->plane_mode;
  339. blks = kmalloc(nr_blks, GFP_KERNEL);
  340. if (!blks)
  341. return -ENOMEM;
  342. ppa.ppa = 0;
  343. ppa.g.ch = rlun->bppa.g.ch;
  344. ppa.g.lun = rlun->bppa.g.lun;
  345. ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
  346. if (ret)
  347. goto out;
  348. nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
  349. if (nr_blks < 0) {
  350. ret = nr_blks;
  351. goto out;
  352. }
  353. rlun->bb_list = blks;
  354. return 0;
  355. out:
  356. kfree(blks);
  357. return ret;
  358. }
  359. static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line,
  360. int blk_per_line)
  361. {
  362. struct nvm_tgt_dev *dev = pblk->dev;
  363. struct nvm_geo *geo = &dev->geo;
  364. struct pblk_lun *rlun;
  365. int bb_cnt = 0;
  366. int i;
  367. for (i = 0; i < blk_per_line; i++) {
  368. rlun = &pblk->luns[i];
  369. if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
  370. continue;
  371. set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap);
  372. bb_cnt++;
  373. }
  374. return bb_cnt;
  375. }
  376. static int pblk_alloc_line_bitmaps(struct pblk *pblk, struct pblk_line *line)
  377. {
  378. struct pblk_line_meta *lm = &pblk->lm;
  379. line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
  380. if (!line->blk_bitmap)
  381. return -ENOMEM;
  382. line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
  383. if (!line->erase_bitmap) {
  384. kfree(line->blk_bitmap);
  385. return -ENOMEM;
  386. }
  387. return 0;
  388. }
  389. static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
  390. {
  391. struct nvm_tgt_dev *dev = pblk->dev;
  392. struct nvm_geo *geo = &dev->geo;
  393. struct pblk_lun *rlun;
  394. int i, ret;
  395. /* TODO: Implement unbalanced LUN support */
  396. if (geo->luns_per_chnl < 0) {
  397. pr_err("pblk: unbalanced LUN config.\n");
  398. return -EINVAL;
  399. }
  400. pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
  401. if (!pblk->luns)
  402. return -ENOMEM;
  403. for (i = 0; i < geo->nr_luns; i++) {
  404. /* Stripe across channels */
  405. int ch = i % geo->nr_chnls;
  406. int lun_raw = i / geo->nr_chnls;
  407. int lunid = lun_raw + ch * geo->luns_per_chnl;
  408. rlun = &pblk->luns[i];
  409. rlun->bppa = luns[lunid];
  410. sema_init(&rlun->wr_sem, 1);
  411. ret = pblk_bb_discovery(dev, rlun);
  412. if (ret) {
  413. while (--i >= 0)
  414. kfree(pblk->luns[i].bb_list);
  415. return ret;
  416. }
  417. }
  418. return 0;
  419. }
  420. static int pblk_lines_configure(struct pblk *pblk, int flags)
  421. {
  422. struct pblk_line *line = NULL;
  423. int ret = 0;
  424. if (!(flags & NVM_TARGET_FACTORY)) {
  425. line = pblk_recov_l2p(pblk);
  426. if (IS_ERR(line)) {
  427. pr_err("pblk: could not recover l2p table\n");
  428. ret = -EFAULT;
  429. }
  430. }
  431. #ifdef CONFIG_NVM_DEBUG
  432. pr_info("pblk init: L2P CRC: %x\n", pblk_l2p_crc(pblk));
  433. #endif
  434. /* Free full lines directly as GC has not been started yet */
  435. pblk_gc_free_full_lines(pblk);
  436. if (!line) {
  437. /* Configure next line for user data */
  438. line = pblk_line_get_first_data(pblk);
  439. if (!line) {
  440. pr_err("pblk: line list corrupted\n");
  441. ret = -EFAULT;
  442. }
  443. }
  444. return ret;
  445. }
  446. /* See comment over struct line_emeta definition */
  447. static unsigned int calc_emeta_len(struct pblk *pblk)
  448. {
  449. struct pblk_line_meta *lm = &pblk->lm;
  450. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  451. struct nvm_tgt_dev *dev = pblk->dev;
  452. struct nvm_geo *geo = &dev->geo;
  453. /* Round to sector size so that lba_list starts on its own sector */
  454. lm->emeta_sec[1] = DIV_ROUND_UP(
  455. sizeof(struct line_emeta) + lm->blk_bitmap_len,
  456. geo->sec_size);
  457. lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size;
  458. /* Round to sector size so that vsc_list starts on its own sector */
  459. lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
  460. lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
  461. geo->sec_size);
  462. lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size;
  463. lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
  464. geo->sec_size);
  465. lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size;
  466. lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
  467. return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
  468. }
  469. static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
  470. {
  471. struct nvm_tgt_dev *dev = pblk->dev;
  472. struct nvm_geo *geo = &dev->geo;
  473. sector_t provisioned;
  474. pblk->over_pct = 20;
  475. provisioned = nr_free_blks;
  476. provisioned *= (100 - pblk->over_pct);
  477. sector_div(provisioned, 100);
  478. /* Internally pblk manages all free blocks, but all calculations based
  479. * on user capacity consider only provisioned blocks
  480. */
  481. pblk->rl.total_blocks = nr_free_blks;
  482. pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
  483. pblk->capacity = provisioned * geo->sec_per_blk;
  484. atomic_set(&pblk->rl.free_blocks, nr_free_blks);
  485. }
  486. static int pblk_lines_alloc_metadata(struct pblk *pblk)
  487. {
  488. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  489. struct pblk_line_meta *lm = &pblk->lm;
  490. int i;
  491. /* smeta is always small enough to fit on a kmalloc memory allocation,
  492. * emeta depends on the number of LUNs allocated to the pblk instance
  493. */
  494. for (i = 0; i < PBLK_DATA_LINES; i++) {
  495. l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
  496. if (!l_mg->sline_meta[i])
  497. goto fail_free_smeta;
  498. }
  499. /* emeta allocates three different buffers for managing metadata with
  500. * in-memory and in-media layouts
  501. */
  502. for (i = 0; i < PBLK_DATA_LINES; i++) {
  503. struct pblk_emeta *emeta;
  504. emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
  505. if (!emeta)
  506. goto fail_free_emeta;
  507. if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
  508. l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
  509. emeta->buf = vmalloc(lm->emeta_len[0]);
  510. if (!emeta->buf) {
  511. kfree(emeta);
  512. goto fail_free_emeta;
  513. }
  514. emeta->nr_entries = lm->emeta_sec[0];
  515. l_mg->eline_meta[i] = emeta;
  516. } else {
  517. l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
  518. emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
  519. if (!emeta->buf) {
  520. kfree(emeta);
  521. goto fail_free_emeta;
  522. }
  523. emeta->nr_entries = lm->emeta_sec[0];
  524. l_mg->eline_meta[i] = emeta;
  525. }
  526. }
  527. l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
  528. if (!l_mg->vsc_list)
  529. goto fail_free_emeta;
  530. for (i = 0; i < l_mg->nr_lines; i++)
  531. l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
  532. return 0;
  533. fail_free_emeta:
  534. while (--i >= 0) {
  535. if (l_mg->emeta_alloc_type == PBLK_VMALLOC_META)
  536. vfree(l_mg->eline_meta[i]->buf);
  537. else
  538. kfree(l_mg->eline_meta[i]->buf);
  539. kfree(l_mg->eline_meta[i]);
  540. }
  541. fail_free_smeta:
  542. for (i = 0; i < PBLK_DATA_LINES; i++)
  543. kfree(l_mg->sline_meta[i]);
  544. return -ENOMEM;
  545. }
  546. static int pblk_lines_init(struct pblk *pblk)
  547. {
  548. struct nvm_tgt_dev *dev = pblk->dev;
  549. struct nvm_geo *geo = &dev->geo;
  550. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  551. struct pblk_line_meta *lm = &pblk->lm;
  552. struct pblk_line *line;
  553. unsigned int smeta_len, emeta_len;
  554. long nr_bad_blks, nr_free_blks;
  555. int bb_distance, max_write_ppas, mod;
  556. int i, ret;
  557. pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
  558. max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
  559. pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
  560. max_write_ppas : nvm_max_phys_sects(dev);
  561. pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
  562. if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
  563. pr_err("pblk: cannot support device max_phys_sect\n");
  564. return -EINVAL;
  565. }
  566. div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
  567. if (mod) {
  568. pr_err("pblk: bad configuration of sectors/pages\n");
  569. return -EINVAL;
  570. }
  571. l_mg->nr_lines = geo->blks_per_lun;
  572. l_mg->log_line = l_mg->data_line = NULL;
  573. l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
  574. l_mg->nr_free_lines = 0;
  575. bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  576. lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
  577. lm->blk_per_line = geo->nr_luns;
  578. lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
  579. lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
  580. lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
  581. lm->mid_thrs = lm->sec_per_line / 2;
  582. lm->high_thrs = lm->sec_per_line / 4;
  583. lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
  584. /* Calculate necessary pages for smeta. See comment over struct
  585. * line_smeta definition
  586. */
  587. i = 1;
  588. add_smeta_page:
  589. lm->smeta_sec = i * geo->sec_per_pl;
  590. lm->smeta_len = lm->smeta_sec * geo->sec_size;
  591. smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
  592. if (smeta_len > lm->smeta_len) {
  593. i++;
  594. goto add_smeta_page;
  595. }
  596. /* Calculate necessary pages for emeta. See comment over struct
  597. * line_emeta definition
  598. */
  599. i = 1;
  600. add_emeta_page:
  601. lm->emeta_sec[0] = i * geo->sec_per_pl;
  602. lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;
  603. emeta_len = calc_emeta_len(pblk);
  604. if (emeta_len > lm->emeta_len[0]) {
  605. i++;
  606. goto add_emeta_page;
  607. }
  608. lm->emeta_bb = geo->nr_luns > i ? geo->nr_luns - i : 0;
  609. lm->min_blk_line = 1;
  610. if (geo->nr_luns > 1)
  611. lm->min_blk_line += DIV_ROUND_UP(lm->smeta_sec +
  612. lm->emeta_sec[0], geo->sec_per_blk);
  613. if (lm->min_blk_line > lm->blk_per_line) {
  614. pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
  615. lm->blk_per_line);
  616. ret = -EINVAL;
  617. goto fail;
  618. }
  619. ret = pblk_lines_alloc_metadata(pblk);
  620. if (ret)
  621. goto fail;
  622. l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
  623. if (!l_mg->bb_template) {
  624. ret = -ENOMEM;
  625. goto fail_free_meta;
  626. }
  627. l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
  628. if (!l_mg->bb_aux) {
  629. ret = -ENOMEM;
  630. goto fail_free_bb_template;
  631. }
  632. bb_distance = (geo->nr_luns) * geo->sec_per_pl;
  633. for (i = 0; i < lm->sec_per_line; i += bb_distance)
  634. bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
  635. INIT_LIST_HEAD(&l_mg->free_list);
  636. INIT_LIST_HEAD(&l_mg->corrupt_list);
  637. INIT_LIST_HEAD(&l_mg->bad_list);
  638. INIT_LIST_HEAD(&l_mg->gc_full_list);
  639. INIT_LIST_HEAD(&l_mg->gc_high_list);
  640. INIT_LIST_HEAD(&l_mg->gc_mid_list);
  641. INIT_LIST_HEAD(&l_mg->gc_low_list);
  642. INIT_LIST_HEAD(&l_mg->gc_empty_list);
  643. INIT_LIST_HEAD(&l_mg->emeta_list);
  644. l_mg->gc_lists[0] = &l_mg->gc_high_list;
  645. l_mg->gc_lists[1] = &l_mg->gc_mid_list;
  646. l_mg->gc_lists[2] = &l_mg->gc_low_list;
  647. spin_lock_init(&l_mg->free_lock);
  648. spin_lock_init(&l_mg->close_lock);
  649. spin_lock_init(&l_mg->gc_lock);
  650. pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
  651. GFP_KERNEL);
  652. if (!pblk->lines) {
  653. ret = -ENOMEM;
  654. goto fail_free_bb_aux;
  655. }
  656. nr_free_blks = 0;
  657. for (i = 0; i < l_mg->nr_lines; i++) {
  658. int blk_in_line;
  659. line = &pblk->lines[i];
  660. line->pblk = pblk;
  661. line->id = i;
  662. line->type = PBLK_LINETYPE_FREE;
  663. line->state = PBLK_LINESTATE_FREE;
  664. line->gc_group = PBLK_LINEGC_NONE;
  665. line->vsc = &l_mg->vsc_list[i];
  666. spin_lock_init(&line->lock);
  667. ret = pblk_alloc_line_bitmaps(pblk, line);
  668. if (ret)
  669. goto fail_free_lines;
  670. nr_bad_blks = pblk_bb_line(pblk, line, lm->blk_per_line);
  671. if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
  672. pblk_free_line_bitmaps(line);
  673. ret = -EINVAL;
  674. goto fail_free_lines;
  675. }
  676. blk_in_line = lm->blk_per_line - nr_bad_blks;
  677. if (blk_in_line < lm->min_blk_line) {
  678. line->state = PBLK_LINESTATE_BAD;
  679. list_add_tail(&line->list, &l_mg->bad_list);
  680. continue;
  681. }
  682. nr_free_blks += blk_in_line;
  683. atomic_set(&line->blk_in_line, blk_in_line);
  684. l_mg->nr_free_lines++;
  685. list_add_tail(&line->list, &l_mg->free_list);
  686. }
  687. pblk_set_provision(pblk, nr_free_blks);
  688. /* Cleanup per-LUN bad block lists - managed within lines on run-time */
  689. for (i = 0; i < geo->nr_luns; i++)
  690. kfree(pblk->luns[i].bb_list);
  691. return 0;
  692. fail_free_lines:
  693. while (--i >= 0)
  694. pblk_free_line_bitmaps(&pblk->lines[i]);
  695. fail_free_bb_aux:
  696. kfree(l_mg->bb_aux);
  697. fail_free_bb_template:
  698. kfree(l_mg->bb_template);
  699. fail_free_meta:
  700. pblk_line_meta_free(pblk);
  701. fail:
  702. for (i = 0; i < geo->nr_luns; i++)
  703. kfree(pblk->luns[i].bb_list);
  704. return ret;
  705. }
  706. static int pblk_writer_init(struct pblk *pblk)
  707. {
  708. setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
  709. mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
  710. pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
  711. if (IS_ERR(pblk->writer_ts)) {
  712. pr_err("pblk: could not allocate writer kthread\n");
  713. return PTR_ERR(pblk->writer_ts);
  714. }
  715. return 0;
  716. }
  717. static void pblk_writer_stop(struct pblk *pblk)
  718. {
  719. /* The pipeline must be stopped and the write buffer emptied before the
  720. * write thread is stopped
  721. */
  722. WARN(pblk_rb_read_count(&pblk->rwb),
  723. "Stopping not fully persisted write buffer\n");
  724. WARN(pblk_rb_sync_count(&pblk->rwb),
  725. "Stopping not fully synced write buffer\n");
  726. if (pblk->writer_ts)
  727. kthread_stop(pblk->writer_ts);
  728. del_timer(&pblk->wtimer);
  729. }
  730. static void pblk_free(struct pblk *pblk)
  731. {
  732. pblk_luns_free(pblk);
  733. pblk_lines_free(pblk);
  734. pblk_line_meta_free(pblk);
  735. pblk_core_free(pblk);
  736. pblk_l2p_free(pblk);
  737. kfree(pblk);
  738. }
  739. static void pblk_tear_down(struct pblk *pblk)
  740. {
  741. pblk_pipeline_stop(pblk);
  742. pblk_writer_stop(pblk);
  743. pblk_rb_sync_l2p(&pblk->rwb);
  744. pblk_rwb_free(pblk);
  745. pblk_rl_free(&pblk->rl);
  746. pr_debug("pblk: consistent tear down\n");
  747. }
  748. static void pblk_exit(void *private)
  749. {
  750. struct pblk *pblk = private;
  751. down_write(&pblk_lock);
  752. pblk_gc_exit(pblk);
  753. pblk_tear_down(pblk);
  754. #ifdef CONFIG_NVM_DEBUG
  755. pr_info("pblk exit: L2P CRC: %x\n", pblk_l2p_crc(pblk));
  756. #endif
  757. pblk_free(pblk);
  758. up_write(&pblk_lock);
  759. }
  760. static sector_t pblk_capacity(void *private)
  761. {
  762. struct pblk *pblk = private;
  763. return pblk->capacity * NR_PHY_IN_LOG;
  764. }
  765. static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
  766. int flags)
  767. {
  768. struct nvm_geo *geo = &dev->geo;
  769. struct request_queue *bqueue = dev->q;
  770. struct request_queue *tqueue = tdisk->queue;
  771. struct pblk *pblk;
  772. int ret;
  773. if (dev->identity.dom & NVM_RSP_L2P) {
  774. pr_err("pblk: host-side L2P table not supported. (%x)\n",
  775. dev->identity.dom);
  776. return ERR_PTR(-EINVAL);
  777. }
  778. pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
  779. if (!pblk)
  780. return ERR_PTR(-ENOMEM);
  781. pblk->dev = dev;
  782. pblk->disk = tdisk;
  783. pblk->state = PBLK_STATE_RUNNING;
  784. pblk->gc.gc_enabled = 0;
  785. spin_lock_init(&pblk->trans_lock);
  786. spin_lock_init(&pblk->lock);
  787. if (flags & NVM_TARGET_FACTORY)
  788. pblk_setup_uuid(pblk);
  789. #ifdef CONFIG_NVM_DEBUG
  790. atomic_long_set(&pblk->inflight_writes, 0);
  791. atomic_long_set(&pblk->padded_writes, 0);
  792. atomic_long_set(&pblk->padded_wb, 0);
  793. atomic_long_set(&pblk->nr_flush, 0);
  794. atomic_long_set(&pblk->req_writes, 0);
  795. atomic_long_set(&pblk->sub_writes, 0);
  796. atomic_long_set(&pblk->sync_writes, 0);
  797. atomic_long_set(&pblk->inflight_reads, 0);
  798. atomic_long_set(&pblk->cache_reads, 0);
  799. atomic_long_set(&pblk->sync_reads, 0);
  800. atomic_long_set(&pblk->recov_writes, 0);
  801. atomic_long_set(&pblk->recov_writes, 0);
  802. atomic_long_set(&pblk->recov_gc_writes, 0);
  803. atomic_long_set(&pblk->recov_gc_reads, 0);
  804. #endif
  805. atomic_long_set(&pblk->read_failed, 0);
  806. atomic_long_set(&pblk->read_empty, 0);
  807. atomic_long_set(&pblk->read_high_ecc, 0);
  808. atomic_long_set(&pblk->read_failed_gc, 0);
  809. atomic_long_set(&pblk->write_failed, 0);
  810. atomic_long_set(&pblk->erase_failed, 0);
  811. ret = pblk_luns_init(pblk, dev->luns);
  812. if (ret) {
  813. pr_err("pblk: could not initialize luns\n");
  814. goto fail;
  815. }
  816. ret = pblk_lines_init(pblk);
  817. if (ret) {
  818. pr_err("pblk: could not initialize lines\n");
  819. goto fail_free_luns;
  820. }
  821. ret = pblk_core_init(pblk);
  822. if (ret) {
  823. pr_err("pblk: could not initialize core\n");
  824. goto fail_free_line_meta;
  825. }
  826. ret = pblk_l2p_init(pblk);
  827. if (ret) {
  828. pr_err("pblk: could not initialize maps\n");
  829. goto fail_free_core;
  830. }
  831. ret = pblk_lines_configure(pblk, flags);
  832. if (ret) {
  833. pr_err("pblk: could not configure lines\n");
  834. goto fail_free_l2p;
  835. }
  836. ret = pblk_writer_init(pblk);
  837. if (ret) {
  838. pr_err("pblk: could not initialize write thread\n");
  839. goto fail_free_lines;
  840. }
  841. ret = pblk_gc_init(pblk);
  842. if (ret) {
  843. pr_err("pblk: could not initialize gc\n");
  844. goto fail_stop_writer;
  845. }
  846. /* inherit the size from the underlying device */
  847. blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
  848. blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
  849. blk_queue_write_cache(tqueue, true, false);
  850. tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
  851. tqueue->limits.discard_alignment = 0;
  852. blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
  853. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
  854. pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
  855. geo->nr_luns, pblk->l_mg.nr_lines,
  856. (unsigned long long)pblk->rl.nr_secs,
  857. pblk->rwb.nr_entries);
  858. wake_up_process(pblk->writer_ts);
  859. /* Check if we need to start GC */
  860. pblk_gc_should_kick(pblk);
  861. return pblk;
  862. fail_stop_writer:
  863. pblk_writer_stop(pblk);
  864. fail_free_lines:
  865. pblk_lines_free(pblk);
  866. fail_free_l2p:
  867. pblk_l2p_free(pblk);
  868. fail_free_core:
  869. pblk_core_free(pblk);
  870. fail_free_line_meta:
  871. pblk_line_meta_free(pblk);
  872. fail_free_luns:
  873. pblk_luns_free(pblk);
  874. fail:
  875. kfree(pblk);
  876. return ERR_PTR(ret);
  877. }
  878. /* physical block device target */
  879. static struct nvm_tgt_type tt_pblk = {
  880. .name = "pblk",
  881. .version = {1, 0, 0},
  882. .make_rq = pblk_make_rq,
  883. .capacity = pblk_capacity,
  884. .init = pblk_init,
  885. .exit = pblk_exit,
  886. .sysfs_init = pblk_sysfs_init,
  887. .sysfs_exit = pblk_sysfs_exit,
  888. .owner = THIS_MODULE,
  889. };
  890. static int __init pblk_module_init(void)
  891. {
  892. int ret;
  893. pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
  894. if (!pblk_bio_set)
  895. return -ENOMEM;
  896. ret = nvm_register_tgt_type(&tt_pblk);
  897. if (ret)
  898. bioset_free(pblk_bio_set);
  899. return ret;
  900. }
  901. static void pblk_module_exit(void)
  902. {
  903. bioset_free(pblk_bio_set);
  904. nvm_unregister_tgt_type(&tt_pblk);
  905. }
  906. module_init(pblk_module_init);
  907. module_exit(pblk_module_exit);
  908. MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
  909. MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
  910. MODULE_LICENSE("GPL v2");
  911. MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");