pblk-init.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073
  1. /*
  2. * Copyright (C) 2015 IT University of Copenhagen (rrpc.c)
  3. * Copyright (C) 2016 CNEX Labs
  4. * Initial release: Javier Gonzalez <javier@cnexlabs.com>
  5. * Matias Bjorling <matias@cnexlabs.com>
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License version
  9. * 2 as published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but
  12. * WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * General Public License for more details.
  15. *
  16. * Implementation of a physical block-device target for Open-channel SSDs.
  17. *
  18. * pblk-init.c - pblk's initialization.
  19. */
  20. #include "pblk.h"
  21. static struct kmem_cache *pblk_blk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
  22. *pblk_w_rq_cache, *pblk_line_meta_cache;
  23. static DECLARE_RWSEM(pblk_lock);
  24. struct bio_set *pblk_bio_set;
  25. static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
  26. struct bio *bio)
  27. {
  28. int ret;
  29. /* Read requests must be <= 256kb due to NVMe's 64 bit completion bitmap
  30. * constraint. Writes can be of arbitrary size.
  31. */
  32. if (bio_data_dir(bio) == READ) {
  33. blk_queue_split(q, &bio);
  34. ret = pblk_submit_read(pblk, bio);
  35. if (ret == NVM_IO_DONE && bio_flagged(bio, BIO_CLONED))
  36. bio_put(bio);
  37. return ret;
  38. }
  39. /* Prevent deadlock in the case of a modest LUN configuration and large
  40. * user I/Os. Unless stalled, the rate limiter leaves at least 256KB
  41. * available for user I/O.
  42. */
  43. if (unlikely(pblk_get_secs(bio) >= pblk_rl_sysfs_rate_show(&pblk->rl)))
  44. blk_queue_split(q, &bio);
  45. return pblk_write_to_cache(pblk, bio, PBLK_IOTYPE_USER);
  46. }
  47. static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio)
  48. {
  49. struct pblk *pblk = q->queuedata;
  50. if (bio_op(bio) == REQ_OP_DISCARD) {
  51. pblk_discard(pblk, bio);
  52. if (!(bio->bi_opf & REQ_PREFLUSH)) {
  53. bio_endio(bio);
  54. return BLK_QC_T_NONE;
  55. }
  56. }
  57. switch (pblk_rw_io(q, pblk, bio)) {
  58. case NVM_IO_ERR:
  59. bio_io_error(bio);
  60. break;
  61. case NVM_IO_DONE:
  62. bio_endio(bio);
  63. break;
  64. }
  65. return BLK_QC_T_NONE;
  66. }
  67. static void pblk_l2p_free(struct pblk *pblk)
  68. {
  69. vfree(pblk->trans_map);
  70. }
  71. static int pblk_l2p_init(struct pblk *pblk)
  72. {
  73. sector_t i;
  74. struct ppa_addr ppa;
  75. int entry_size = 8;
  76. if (pblk->ppaf_bitsize < 32)
  77. entry_size = 4;
  78. pblk->trans_map = vmalloc(entry_size * pblk->rl.nr_secs);
  79. if (!pblk->trans_map)
  80. return -ENOMEM;
  81. pblk_ppa_set_empty(&ppa);
  82. for (i = 0; i < pblk->rl.nr_secs; i++)
  83. pblk_trans_map_set(pblk, i, ppa);
  84. return 0;
  85. }
  86. static void pblk_rwb_free(struct pblk *pblk)
  87. {
  88. if (pblk_rb_tear_down_check(&pblk->rwb))
  89. pr_err("pblk: write buffer error on tear down\n");
  90. pblk_rb_data_free(&pblk->rwb);
  91. vfree(pblk_rb_entries_ref(&pblk->rwb));
  92. }
  93. static int pblk_rwb_init(struct pblk *pblk)
  94. {
  95. struct nvm_tgt_dev *dev = pblk->dev;
  96. struct nvm_geo *geo = &dev->geo;
  97. struct pblk_rb_entry *entries;
  98. unsigned long nr_entries;
  99. unsigned int power_size, power_seg_sz;
  100. nr_entries = pblk_rb_calculate_size(pblk->pgs_in_buffer);
  101. entries = vzalloc(nr_entries * sizeof(struct pblk_rb_entry));
  102. if (!entries)
  103. return -ENOMEM;
  104. power_size = get_count_order(nr_entries);
  105. power_seg_sz = get_count_order(geo->sec_size);
  106. return pblk_rb_init(&pblk->rwb, entries, power_size, power_seg_sz);
  107. }
  108. /* Minimum pages needed within a lun */
  109. #define PAGE_POOL_SIZE 16
  110. #define ADDR_POOL_SIZE 64
  111. static int pblk_set_ppaf(struct pblk *pblk)
  112. {
  113. struct nvm_tgt_dev *dev = pblk->dev;
  114. struct nvm_geo *geo = &dev->geo;
  115. struct nvm_addr_format ppaf = geo->ppaf;
  116. int power_len;
  117. /* Re-calculate channel and lun format to adapt to configuration */
  118. power_len = get_count_order(geo->nr_chnls);
  119. if (1 << power_len != geo->nr_chnls) {
  120. pr_err("pblk: supports only power-of-two channel config.\n");
  121. return -EINVAL;
  122. }
  123. ppaf.ch_len = power_len;
  124. power_len = get_count_order(geo->luns_per_chnl);
  125. if (1 << power_len != geo->luns_per_chnl) {
  126. pr_err("pblk: supports only power-of-two LUN config.\n");
  127. return -EINVAL;
  128. }
  129. ppaf.lun_len = power_len;
  130. pblk->ppaf.sec_offset = 0;
  131. pblk->ppaf.pln_offset = ppaf.sect_len;
  132. pblk->ppaf.ch_offset = pblk->ppaf.pln_offset + ppaf.pln_len;
  133. pblk->ppaf.lun_offset = pblk->ppaf.ch_offset + ppaf.ch_len;
  134. pblk->ppaf.pg_offset = pblk->ppaf.lun_offset + ppaf.lun_len;
  135. pblk->ppaf.blk_offset = pblk->ppaf.pg_offset + ppaf.pg_len;
  136. pblk->ppaf.sec_mask = (1ULL << ppaf.sect_len) - 1;
  137. pblk->ppaf.pln_mask = ((1ULL << ppaf.pln_len) - 1) <<
  138. pblk->ppaf.pln_offset;
  139. pblk->ppaf.ch_mask = ((1ULL << ppaf.ch_len) - 1) <<
  140. pblk->ppaf.ch_offset;
  141. pblk->ppaf.lun_mask = ((1ULL << ppaf.lun_len) - 1) <<
  142. pblk->ppaf.lun_offset;
  143. pblk->ppaf.pg_mask = ((1ULL << ppaf.pg_len) - 1) <<
  144. pblk->ppaf.pg_offset;
  145. pblk->ppaf.blk_mask = ((1ULL << ppaf.blk_len) - 1) <<
  146. pblk->ppaf.blk_offset;
  147. pblk->ppaf_bitsize = pblk->ppaf.blk_offset + ppaf.blk_len;
  148. return 0;
  149. }
  150. static int pblk_init_global_caches(struct pblk *pblk)
  151. {
  152. char cache_name[PBLK_CACHE_NAME_LEN];
  153. down_write(&pblk_lock);
  154. pblk_blk_ws_cache = kmem_cache_create("pblk_blk_ws",
  155. sizeof(struct pblk_line_ws), 0, 0, NULL);
  156. if (!pblk_blk_ws_cache) {
  157. up_write(&pblk_lock);
  158. return -ENOMEM;
  159. }
  160. pblk_rec_cache = kmem_cache_create("pblk_rec",
  161. sizeof(struct pblk_rec_ctx), 0, 0, NULL);
  162. if (!pblk_rec_cache) {
  163. kmem_cache_destroy(pblk_blk_ws_cache);
  164. up_write(&pblk_lock);
  165. return -ENOMEM;
  166. }
  167. pblk_g_rq_cache = kmem_cache_create("pblk_g_rq", pblk_g_rq_size,
  168. 0, 0, NULL);
  169. if (!pblk_g_rq_cache) {
  170. kmem_cache_destroy(pblk_blk_ws_cache);
  171. kmem_cache_destroy(pblk_rec_cache);
  172. up_write(&pblk_lock);
  173. return -ENOMEM;
  174. }
  175. pblk_w_rq_cache = kmem_cache_create("pblk_w_rq", pblk_w_rq_size,
  176. 0, 0, NULL);
  177. if (!pblk_w_rq_cache) {
  178. kmem_cache_destroy(pblk_blk_ws_cache);
  179. kmem_cache_destroy(pblk_rec_cache);
  180. kmem_cache_destroy(pblk_g_rq_cache);
  181. up_write(&pblk_lock);
  182. return -ENOMEM;
  183. }
  184. snprintf(cache_name, sizeof(cache_name), "pblk_line_m_%s",
  185. pblk->disk->disk_name);
  186. pblk_line_meta_cache = kmem_cache_create(cache_name,
  187. pblk->lm.sec_bitmap_len, 0, 0, NULL);
  188. if (!pblk_line_meta_cache) {
  189. kmem_cache_destroy(pblk_blk_ws_cache);
  190. kmem_cache_destroy(pblk_rec_cache);
  191. kmem_cache_destroy(pblk_g_rq_cache);
  192. kmem_cache_destroy(pblk_w_rq_cache);
  193. up_write(&pblk_lock);
  194. return -ENOMEM;
  195. }
  196. up_write(&pblk_lock);
  197. return 0;
  198. }
  199. static int pblk_core_init(struct pblk *pblk)
  200. {
  201. struct nvm_tgt_dev *dev = pblk->dev;
  202. struct nvm_geo *geo = &dev->geo;
  203. pblk->pgs_in_buffer = NVM_MEM_PAGE_WRITE * geo->sec_per_pg *
  204. geo->nr_planes * geo->nr_luns;
  205. if (pblk_init_global_caches(pblk))
  206. return -ENOMEM;
  207. pblk->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
  208. if (!pblk->page_pool)
  209. return -ENOMEM;
  210. pblk->line_ws_pool = mempool_create_slab_pool(PBLK_WS_POOL_SIZE,
  211. pblk_blk_ws_cache);
  212. if (!pblk->line_ws_pool)
  213. goto free_page_pool;
  214. pblk->rec_pool = mempool_create_slab_pool(geo->nr_luns, pblk_rec_cache);
  215. if (!pblk->rec_pool)
  216. goto free_blk_ws_pool;
  217. pblk->g_rq_pool = mempool_create_slab_pool(PBLK_READ_REQ_POOL_SIZE,
  218. pblk_g_rq_cache);
  219. if (!pblk->g_rq_pool)
  220. goto free_rec_pool;
  221. pblk->w_rq_pool = mempool_create_slab_pool(geo->nr_luns * 2,
  222. pblk_w_rq_cache);
  223. if (!pblk->w_rq_pool)
  224. goto free_g_rq_pool;
  225. pblk->line_meta_pool =
  226. mempool_create_slab_pool(PBLK_META_POOL_SIZE,
  227. pblk_line_meta_cache);
  228. if (!pblk->line_meta_pool)
  229. goto free_w_rq_pool;
  230. pblk->close_wq = alloc_workqueue("pblk-close-wq",
  231. WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_NR_CLOSE_JOBS);
  232. if (!pblk->close_wq)
  233. goto free_line_meta_pool;
  234. pblk->bb_wq = alloc_workqueue("pblk-bb-wq",
  235. WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
  236. if (!pblk->bb_wq)
  237. goto free_close_wq;
  238. if (pblk_set_ppaf(pblk))
  239. goto free_bb_wq;
  240. if (pblk_rwb_init(pblk))
  241. goto free_bb_wq;
  242. INIT_LIST_HEAD(&pblk->compl_list);
  243. return 0;
  244. free_bb_wq:
  245. destroy_workqueue(pblk->bb_wq);
  246. free_close_wq:
  247. destroy_workqueue(pblk->close_wq);
  248. free_line_meta_pool:
  249. mempool_destroy(pblk->line_meta_pool);
  250. free_w_rq_pool:
  251. mempool_destroy(pblk->w_rq_pool);
  252. free_g_rq_pool:
  253. mempool_destroy(pblk->g_rq_pool);
  254. free_rec_pool:
  255. mempool_destroy(pblk->rec_pool);
  256. free_blk_ws_pool:
  257. mempool_destroy(pblk->line_ws_pool);
  258. free_page_pool:
  259. mempool_destroy(pblk->page_pool);
  260. return -ENOMEM;
  261. }
  262. static void pblk_core_free(struct pblk *pblk)
  263. {
  264. if (pblk->close_wq)
  265. destroy_workqueue(pblk->close_wq);
  266. if (pblk->bb_wq)
  267. destroy_workqueue(pblk->bb_wq);
  268. mempool_destroy(pblk->page_pool);
  269. mempool_destroy(pblk->line_ws_pool);
  270. mempool_destroy(pblk->rec_pool);
  271. mempool_destroy(pblk->g_rq_pool);
  272. mempool_destroy(pblk->w_rq_pool);
  273. mempool_destroy(pblk->line_meta_pool);
  274. kmem_cache_destroy(pblk_blk_ws_cache);
  275. kmem_cache_destroy(pblk_rec_cache);
  276. kmem_cache_destroy(pblk_g_rq_cache);
  277. kmem_cache_destroy(pblk_w_rq_cache);
  278. kmem_cache_destroy(pblk_line_meta_cache);
  279. }
  280. static void pblk_luns_free(struct pblk *pblk)
  281. {
  282. kfree(pblk->luns);
  283. }
  284. static void pblk_free_line_bitmaps(struct pblk_line *line)
  285. {
  286. kfree(line->blk_bitmap);
  287. kfree(line->erase_bitmap);
  288. }
  289. static void pblk_lines_free(struct pblk *pblk)
  290. {
  291. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  292. struct pblk_line *line;
  293. int i;
  294. spin_lock(&l_mg->free_lock);
  295. for (i = 0; i < l_mg->nr_lines; i++) {
  296. line = &pblk->lines[i];
  297. pblk_line_free(pblk, line);
  298. pblk_free_line_bitmaps(line);
  299. }
  300. spin_unlock(&l_mg->free_lock);
  301. }
  302. static void pblk_line_meta_free(struct pblk *pblk)
  303. {
  304. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  305. int i;
  306. kfree(l_mg->bb_template);
  307. kfree(l_mg->bb_aux);
  308. kfree(l_mg->vsc_list);
  309. spin_lock(&l_mg->free_lock);
  310. for (i = 0; i < PBLK_DATA_LINES; i++) {
  311. kfree(l_mg->sline_meta[i]);
  312. pblk_mfree(l_mg->eline_meta[i]->buf, l_mg->emeta_alloc_type);
  313. kfree(l_mg->eline_meta[i]);
  314. }
  315. spin_unlock(&l_mg->free_lock);
  316. kfree(pblk->lines);
  317. }
  318. static int pblk_bb_discovery(struct nvm_tgt_dev *dev, struct pblk_lun *rlun)
  319. {
  320. struct nvm_geo *geo = &dev->geo;
  321. struct ppa_addr ppa;
  322. u8 *blks;
  323. int nr_blks, ret;
  324. nr_blks = geo->blks_per_lun * geo->plane_mode;
  325. blks = kmalloc(nr_blks, GFP_KERNEL);
  326. if (!blks)
  327. return -ENOMEM;
  328. ppa.ppa = 0;
  329. ppa.g.ch = rlun->bppa.g.ch;
  330. ppa.g.lun = rlun->bppa.g.lun;
  331. ret = nvm_get_tgt_bb_tbl(dev, ppa, blks);
  332. if (ret)
  333. goto out;
  334. nr_blks = nvm_bb_tbl_fold(dev->parent, blks, nr_blks);
  335. if (nr_blks < 0) {
  336. ret = nr_blks;
  337. goto out;
  338. }
  339. rlun->bb_list = blks;
  340. return 0;
  341. out:
  342. kfree(blks);
  343. return ret;
  344. }
  345. static int pblk_bb_line(struct pblk *pblk, struct pblk_line *line,
  346. int blk_per_line)
  347. {
  348. struct nvm_tgt_dev *dev = pblk->dev;
  349. struct nvm_geo *geo = &dev->geo;
  350. struct pblk_lun *rlun;
  351. int bb_cnt = 0;
  352. int i;
  353. for (i = 0; i < blk_per_line; i++) {
  354. rlun = &pblk->luns[i];
  355. if (rlun->bb_list[line->id] == NVM_BLK_T_FREE)
  356. continue;
  357. set_bit(pblk_ppa_to_pos(geo, rlun->bppa), line->blk_bitmap);
  358. bb_cnt++;
  359. }
  360. return bb_cnt;
  361. }
  362. static int pblk_alloc_line_bitmaps(struct pblk *pblk, struct pblk_line *line)
  363. {
  364. struct pblk_line_meta *lm = &pblk->lm;
  365. line->blk_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
  366. if (!line->blk_bitmap)
  367. return -ENOMEM;
  368. line->erase_bitmap = kzalloc(lm->blk_bitmap_len, GFP_KERNEL);
  369. if (!line->erase_bitmap) {
  370. kfree(line->blk_bitmap);
  371. return -ENOMEM;
  372. }
  373. return 0;
  374. }
  375. static int pblk_luns_init(struct pblk *pblk, struct ppa_addr *luns)
  376. {
  377. struct nvm_tgt_dev *dev = pblk->dev;
  378. struct nvm_geo *geo = &dev->geo;
  379. struct pblk_lun *rlun;
  380. int i, ret;
  381. /* TODO: Implement unbalanced LUN support */
  382. if (geo->luns_per_chnl < 0) {
  383. pr_err("pblk: unbalanced LUN config.\n");
  384. return -EINVAL;
  385. }
  386. pblk->luns = kcalloc(geo->nr_luns, sizeof(struct pblk_lun), GFP_KERNEL);
  387. if (!pblk->luns)
  388. return -ENOMEM;
  389. for (i = 0; i < geo->nr_luns; i++) {
  390. /* Stripe across channels */
  391. int ch = i % geo->nr_chnls;
  392. int lun_raw = i / geo->nr_chnls;
  393. int lunid = lun_raw + ch * geo->luns_per_chnl;
  394. rlun = &pblk->luns[i];
  395. rlun->bppa = luns[lunid];
  396. sema_init(&rlun->wr_sem, 1);
  397. ret = pblk_bb_discovery(dev, rlun);
  398. if (ret) {
  399. while (--i >= 0)
  400. kfree(pblk->luns[i].bb_list);
  401. return ret;
  402. }
  403. }
  404. return 0;
  405. }
  406. static int pblk_lines_configure(struct pblk *pblk, int flags)
  407. {
  408. struct pblk_line *line = NULL;
  409. int ret = 0;
  410. if (!(flags & NVM_TARGET_FACTORY)) {
  411. line = pblk_recov_l2p(pblk);
  412. if (IS_ERR(line)) {
  413. pr_err("pblk: could not recover l2p table\n");
  414. ret = -EFAULT;
  415. }
  416. }
  417. if (!line) {
  418. /* Configure next line for user data */
  419. line = pblk_line_get_first_data(pblk);
  420. if (!line) {
  421. pr_err("pblk: line list corrupted\n");
  422. ret = -EFAULT;
  423. }
  424. }
  425. return ret;
  426. }
  427. /* See comment over struct line_emeta definition */
  428. static unsigned int calc_emeta_len(struct pblk *pblk)
  429. {
  430. struct pblk_line_meta *lm = &pblk->lm;
  431. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  432. struct nvm_tgt_dev *dev = pblk->dev;
  433. struct nvm_geo *geo = &dev->geo;
  434. /* Round to sector size so that lba_list starts on its own sector */
  435. lm->emeta_sec[1] = DIV_ROUND_UP(
  436. sizeof(struct line_emeta) + lm->blk_bitmap_len,
  437. geo->sec_size);
  438. lm->emeta_len[1] = lm->emeta_sec[1] * geo->sec_size;
  439. /* Round to sector size so that vsc_list starts on its own sector */
  440. lm->dsec_per_line = lm->sec_per_line - lm->emeta_sec[0];
  441. lm->emeta_sec[2] = DIV_ROUND_UP(lm->dsec_per_line * sizeof(u64),
  442. geo->sec_size);
  443. lm->emeta_len[2] = lm->emeta_sec[2] * geo->sec_size;
  444. lm->emeta_sec[3] = DIV_ROUND_UP(l_mg->nr_lines * sizeof(u32),
  445. geo->sec_size);
  446. lm->emeta_len[3] = lm->emeta_sec[3] * geo->sec_size;
  447. lm->vsc_list_len = l_mg->nr_lines * sizeof(u32);
  448. return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
  449. }
  450. static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
  451. {
  452. struct nvm_tgt_dev *dev = pblk->dev;
  453. struct nvm_geo *geo = &dev->geo;
  454. sector_t provisioned;
  455. pblk->over_pct = 20;
  456. provisioned = nr_free_blks;
  457. provisioned *= (100 - pblk->over_pct);
  458. sector_div(provisioned, 100);
  459. /* Internally pblk manages all free blocks, but all calculations based
  460. * on user capacity consider only provisioned blocks
  461. */
  462. pblk->rl.total_blocks = nr_free_blks;
  463. pblk->rl.nr_secs = nr_free_blks * geo->sec_per_blk;
  464. pblk->capacity = provisioned * geo->sec_per_blk;
  465. atomic_set(&pblk->rl.free_blocks, nr_free_blks);
  466. }
  467. static int pblk_lines_alloc_metadata(struct pblk *pblk)
  468. {
  469. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  470. struct pblk_line_meta *lm = &pblk->lm;
  471. int i;
  472. /* smeta is always small enough to fit on a kmalloc memory allocation,
  473. * emeta depends on the number of LUNs allocated to the pblk instance
  474. */
  475. for (i = 0; i < PBLK_DATA_LINES; i++) {
  476. l_mg->sline_meta[i] = kmalloc(lm->smeta_len, GFP_KERNEL);
  477. if (!l_mg->sline_meta[i])
  478. goto fail_free_smeta;
  479. }
  480. /* emeta allocates three different buffers for managing metadata with
  481. * in-memory and in-media layouts
  482. */
  483. for (i = 0; i < PBLK_DATA_LINES; i++) {
  484. struct pblk_emeta *emeta;
  485. emeta = kmalloc(sizeof(struct pblk_emeta), GFP_KERNEL);
  486. if (!emeta)
  487. goto fail_free_emeta;
  488. if (lm->emeta_len[0] > KMALLOC_MAX_CACHE_SIZE) {
  489. l_mg->emeta_alloc_type = PBLK_VMALLOC_META;
  490. emeta->buf = vmalloc(lm->emeta_len[0]);
  491. if (!emeta->buf) {
  492. kfree(emeta);
  493. goto fail_free_emeta;
  494. }
  495. emeta->nr_entries = lm->emeta_sec[0];
  496. l_mg->eline_meta[i] = emeta;
  497. } else {
  498. l_mg->emeta_alloc_type = PBLK_KMALLOC_META;
  499. emeta->buf = kmalloc(lm->emeta_len[0], GFP_KERNEL);
  500. if (!emeta->buf) {
  501. kfree(emeta);
  502. goto fail_free_emeta;
  503. }
  504. emeta->nr_entries = lm->emeta_sec[0];
  505. l_mg->eline_meta[i] = emeta;
  506. }
  507. }
  508. l_mg->vsc_list = kcalloc(l_mg->nr_lines, sizeof(__le32), GFP_KERNEL);
  509. if (!l_mg->vsc_list)
  510. goto fail_free_emeta;
  511. for (i = 0; i < l_mg->nr_lines; i++)
  512. l_mg->vsc_list[i] = cpu_to_le32(EMPTY_ENTRY);
  513. return 0;
  514. fail_free_emeta:
  515. while (--i >= 0) {
  516. vfree(l_mg->eline_meta[i]->buf);
  517. kfree(l_mg->eline_meta[i]);
  518. }
  519. fail_free_smeta:
  520. for (i = 0; i < PBLK_DATA_LINES; i++)
  521. kfree(l_mg->sline_meta[i]);
  522. return -ENOMEM;
  523. }
  524. static int pblk_lines_init(struct pblk *pblk)
  525. {
  526. struct nvm_tgt_dev *dev = pblk->dev;
  527. struct nvm_geo *geo = &dev->geo;
  528. struct pblk_line_mgmt *l_mg = &pblk->l_mg;
  529. struct pblk_line_meta *lm = &pblk->lm;
  530. struct pblk_line *line;
  531. unsigned int smeta_len, emeta_len;
  532. long nr_bad_blks, nr_free_blks;
  533. int bb_distance, max_write_ppas, mod;
  534. int i, ret;
  535. pblk->min_write_pgs = geo->sec_per_pl * (geo->sec_size / PAGE_SIZE);
  536. max_write_ppas = pblk->min_write_pgs * geo->nr_luns;
  537. pblk->max_write_pgs = (max_write_ppas < nvm_max_phys_sects(dev)) ?
  538. max_write_ppas : nvm_max_phys_sects(dev);
  539. pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
  540. if (pblk->max_write_pgs > PBLK_MAX_REQ_ADDRS) {
  541. pr_err("pblk: cannot support device max_phys_sect\n");
  542. return -EINVAL;
  543. }
  544. div_u64_rem(geo->sec_per_blk, pblk->min_write_pgs, &mod);
  545. if (mod) {
  546. pr_err("pblk: bad configuration of sectors/pages\n");
  547. return -EINVAL;
  548. }
  549. l_mg->nr_lines = geo->blks_per_lun;
  550. l_mg->log_line = l_mg->data_line = NULL;
  551. l_mg->l_seq_nr = l_mg->d_seq_nr = 0;
  552. l_mg->nr_free_lines = 0;
  553. bitmap_zero(&l_mg->meta_bitmap, PBLK_DATA_LINES);
  554. lm->sec_per_line = geo->sec_per_blk * geo->nr_luns;
  555. lm->blk_per_line = geo->nr_luns;
  556. lm->blk_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
  557. lm->sec_bitmap_len = BITS_TO_LONGS(lm->sec_per_line) * sizeof(long);
  558. lm->lun_bitmap_len = BITS_TO_LONGS(geo->nr_luns) * sizeof(long);
  559. lm->high_thrs = lm->sec_per_line / 2;
  560. lm->mid_thrs = lm->sec_per_line / 4;
  561. lm->meta_distance = (geo->nr_luns / 2) * pblk->min_write_pgs;
  562. /* Calculate necessary pages for smeta. See comment over struct
  563. * line_smeta definition
  564. */
  565. i = 1;
  566. add_smeta_page:
  567. lm->smeta_sec = i * geo->sec_per_pl;
  568. lm->smeta_len = lm->smeta_sec * geo->sec_size;
  569. smeta_len = sizeof(struct line_smeta) + lm->lun_bitmap_len;
  570. if (smeta_len > lm->smeta_len) {
  571. i++;
  572. goto add_smeta_page;
  573. }
  574. /* Calculate necessary pages for emeta. See comment over struct
  575. * line_emeta definition
  576. */
  577. i = 1;
  578. add_emeta_page:
  579. lm->emeta_sec[0] = i * geo->sec_per_pl;
  580. lm->emeta_len[0] = lm->emeta_sec[0] * geo->sec_size;
  581. emeta_len = calc_emeta_len(pblk);
  582. if (emeta_len > lm->emeta_len[0]) {
  583. i++;
  584. goto add_emeta_page;
  585. }
  586. lm->emeta_bb = geo->nr_luns - i;
  587. lm->min_blk_line = 1 + DIV_ROUND_UP(lm->smeta_sec + lm->emeta_sec[0],
  588. geo->sec_per_blk);
  589. if (lm->min_blk_line > lm->blk_per_line) {
  590. pr_err("pblk: config. not supported. Min. LUN in line:%d\n",
  591. lm->blk_per_line);
  592. ret = -EINVAL;
  593. goto fail;
  594. }
  595. ret = pblk_lines_alloc_metadata(pblk);
  596. if (ret)
  597. goto fail;
  598. l_mg->bb_template = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
  599. if (!l_mg->bb_template) {
  600. ret = -ENOMEM;
  601. goto fail_free_meta;
  602. }
  603. l_mg->bb_aux = kzalloc(lm->sec_bitmap_len, GFP_KERNEL);
  604. if (!l_mg->bb_aux) {
  605. ret = -ENOMEM;
  606. goto fail_free_bb_template;
  607. }
  608. bb_distance = (geo->nr_luns) * geo->sec_per_pl;
  609. for (i = 0; i < lm->sec_per_line; i += bb_distance)
  610. bitmap_set(l_mg->bb_template, i, geo->sec_per_pl);
  611. INIT_LIST_HEAD(&l_mg->free_list);
  612. INIT_LIST_HEAD(&l_mg->corrupt_list);
  613. INIT_LIST_HEAD(&l_mg->bad_list);
  614. INIT_LIST_HEAD(&l_mg->gc_full_list);
  615. INIT_LIST_HEAD(&l_mg->gc_high_list);
  616. INIT_LIST_HEAD(&l_mg->gc_mid_list);
  617. INIT_LIST_HEAD(&l_mg->gc_low_list);
  618. INIT_LIST_HEAD(&l_mg->gc_empty_list);
  619. INIT_LIST_HEAD(&l_mg->emeta_list);
  620. l_mg->gc_lists[0] = &l_mg->gc_high_list;
  621. l_mg->gc_lists[1] = &l_mg->gc_mid_list;
  622. l_mg->gc_lists[2] = &l_mg->gc_low_list;
  623. spin_lock_init(&l_mg->free_lock);
  624. spin_lock_init(&l_mg->close_lock);
  625. spin_lock_init(&l_mg->gc_lock);
  626. pblk->lines = kcalloc(l_mg->nr_lines, sizeof(struct pblk_line),
  627. GFP_KERNEL);
  628. if (!pblk->lines) {
  629. ret = -ENOMEM;
  630. goto fail_free_bb_aux;
  631. }
  632. nr_free_blks = 0;
  633. for (i = 0; i < l_mg->nr_lines; i++) {
  634. int blk_in_line;
  635. line = &pblk->lines[i];
  636. line->pblk = pblk;
  637. line->id = i;
  638. line->type = PBLK_LINETYPE_FREE;
  639. line->state = PBLK_LINESTATE_FREE;
  640. line->gc_group = PBLK_LINEGC_NONE;
  641. line->vsc = &l_mg->vsc_list[i];
  642. spin_lock_init(&line->lock);
  643. ret = pblk_alloc_line_bitmaps(pblk, line);
  644. if (ret)
  645. goto fail_free_lines;
  646. nr_bad_blks = pblk_bb_line(pblk, line, lm->blk_per_line);
  647. if (nr_bad_blks < 0 || nr_bad_blks > lm->blk_per_line) {
  648. pblk_free_line_bitmaps(line);
  649. ret = -EINVAL;
  650. goto fail_free_lines;
  651. }
  652. blk_in_line = lm->blk_per_line - nr_bad_blks;
  653. if (blk_in_line < lm->min_blk_line) {
  654. line->state = PBLK_LINESTATE_BAD;
  655. list_add_tail(&line->list, &l_mg->bad_list);
  656. continue;
  657. }
  658. nr_free_blks += blk_in_line;
  659. atomic_set(&line->blk_in_line, blk_in_line);
  660. l_mg->nr_free_lines++;
  661. list_add_tail(&line->list, &l_mg->free_list);
  662. }
  663. pblk_set_provision(pblk, nr_free_blks);
  664. /* Cleanup per-LUN bad block lists - managed within lines on run-time */
  665. for (i = 0; i < geo->nr_luns; i++)
  666. kfree(pblk->luns[i].bb_list);
  667. return 0;
  668. fail_free_lines:
  669. while (--i >= 0)
  670. pblk_free_line_bitmaps(&pblk->lines[i]);
  671. fail_free_bb_aux:
  672. kfree(l_mg->bb_aux);
  673. fail_free_bb_template:
  674. kfree(l_mg->bb_template);
  675. fail_free_meta:
  676. pblk_line_meta_free(pblk);
  677. fail:
  678. for (i = 0; i < geo->nr_luns; i++)
  679. kfree(pblk->luns[i].bb_list);
  680. return ret;
  681. }
  682. static int pblk_writer_init(struct pblk *pblk)
  683. {
  684. setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
  685. mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
  686. pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
  687. if (IS_ERR(pblk->writer_ts)) {
  688. pr_err("pblk: could not allocate writer kthread\n");
  689. return PTR_ERR(pblk->writer_ts);
  690. }
  691. return 0;
  692. }
  693. static void pblk_writer_stop(struct pblk *pblk)
  694. {
  695. /* The pipeline must be stopped and the write buffer emptied before the
  696. * write thread is stopped
  697. */
  698. WARN(pblk_rb_read_count(&pblk->rwb),
  699. "Stopping not fully persisted write buffer\n");
  700. WARN(pblk_rb_sync_count(&pblk->rwb),
  701. "Stopping not fully synced write buffer\n");
  702. if (pblk->writer_ts)
  703. kthread_stop(pblk->writer_ts);
  704. del_timer(&pblk->wtimer);
  705. }
  706. static void pblk_free(struct pblk *pblk)
  707. {
  708. pblk_luns_free(pblk);
  709. pblk_lines_free(pblk);
  710. pblk_line_meta_free(pblk);
  711. pblk_core_free(pblk);
  712. pblk_l2p_free(pblk);
  713. kfree(pblk);
  714. }
  715. static void pblk_tear_down(struct pblk *pblk)
  716. {
  717. pblk_pipeline_stop(pblk);
  718. pblk_writer_stop(pblk);
  719. pblk_rb_sync_l2p(&pblk->rwb);
  720. pblk_rwb_free(pblk);
  721. pblk_rl_free(&pblk->rl);
  722. pr_debug("pblk: consistent tear down\n");
  723. }
  724. static void pblk_exit(void *private)
  725. {
  726. struct pblk *pblk = private;
  727. down_write(&pblk_lock);
  728. pblk_gc_exit(pblk);
  729. pblk_tear_down(pblk);
  730. pblk_free(pblk);
  731. up_write(&pblk_lock);
  732. }
  733. static sector_t pblk_capacity(void *private)
  734. {
  735. struct pblk *pblk = private;
  736. return pblk->capacity * NR_PHY_IN_LOG;
  737. }
  738. static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
  739. int flags)
  740. {
  741. struct nvm_geo *geo = &dev->geo;
  742. struct request_queue *bqueue = dev->q;
  743. struct request_queue *tqueue = tdisk->queue;
  744. struct pblk *pblk;
  745. int ret;
  746. if (dev->identity.dom & NVM_RSP_L2P) {
  747. pr_err("pblk: device-side L2P table not supported. (%x)\n",
  748. dev->identity.dom);
  749. return ERR_PTR(-EINVAL);
  750. }
  751. pblk = kzalloc(sizeof(struct pblk), GFP_KERNEL);
  752. if (!pblk)
  753. return ERR_PTR(-ENOMEM);
  754. pblk->dev = dev;
  755. pblk->disk = tdisk;
  756. pblk->state = PBLK_STATE_RUNNING;
  757. spin_lock_init(&pblk->trans_lock);
  758. spin_lock_init(&pblk->lock);
  759. if (flags & NVM_TARGET_FACTORY)
  760. pblk_setup_uuid(pblk);
  761. #ifdef CONFIG_NVM_DEBUG
  762. atomic_long_set(&pblk->inflight_writes, 0);
  763. atomic_long_set(&pblk->padded_writes, 0);
  764. atomic_long_set(&pblk->padded_wb, 0);
  765. atomic_long_set(&pblk->nr_flush, 0);
  766. atomic_long_set(&pblk->req_writes, 0);
  767. atomic_long_set(&pblk->sub_writes, 0);
  768. atomic_long_set(&pblk->sync_writes, 0);
  769. atomic_long_set(&pblk->inflight_reads, 0);
  770. atomic_long_set(&pblk->cache_reads, 0);
  771. atomic_long_set(&pblk->sync_reads, 0);
  772. atomic_long_set(&pblk->recov_writes, 0);
  773. atomic_long_set(&pblk->recov_writes, 0);
  774. atomic_long_set(&pblk->recov_gc_writes, 0);
  775. #endif
  776. atomic_long_set(&pblk->read_failed, 0);
  777. atomic_long_set(&pblk->read_empty, 0);
  778. atomic_long_set(&pblk->read_high_ecc, 0);
  779. atomic_long_set(&pblk->read_failed_gc, 0);
  780. atomic_long_set(&pblk->write_failed, 0);
  781. atomic_long_set(&pblk->erase_failed, 0);
  782. ret = pblk_luns_init(pblk, dev->luns);
  783. if (ret) {
  784. pr_err("pblk: could not initialize luns\n");
  785. goto fail;
  786. }
  787. ret = pblk_lines_init(pblk);
  788. if (ret) {
  789. pr_err("pblk: could not initialize lines\n");
  790. goto fail_free_luns;
  791. }
  792. ret = pblk_core_init(pblk);
  793. if (ret) {
  794. pr_err("pblk: could not initialize core\n");
  795. goto fail_free_line_meta;
  796. }
  797. ret = pblk_l2p_init(pblk);
  798. if (ret) {
  799. pr_err("pblk: could not initialize maps\n");
  800. goto fail_free_core;
  801. }
  802. ret = pblk_lines_configure(pblk, flags);
  803. if (ret) {
  804. pr_err("pblk: could not configure lines\n");
  805. goto fail_free_l2p;
  806. }
  807. ret = pblk_writer_init(pblk);
  808. if (ret) {
  809. pr_err("pblk: could not initialize write thread\n");
  810. goto fail_free_lines;
  811. }
  812. ret = pblk_gc_init(pblk);
  813. if (ret) {
  814. pr_err("pblk: could not initialize gc\n");
  815. goto fail_stop_writer;
  816. }
  817. /* inherit the size from the underlying device */
  818. blk_queue_logical_block_size(tqueue, queue_physical_block_size(bqueue));
  819. blk_queue_max_hw_sectors(tqueue, queue_max_hw_sectors(bqueue));
  820. blk_queue_write_cache(tqueue, true, false);
  821. tqueue->limits.discard_granularity = geo->pgs_per_blk * geo->pfpg_size;
  822. tqueue->limits.discard_alignment = 0;
  823. blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
  824. queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
  825. pr_info("pblk init: luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
  826. geo->nr_luns, pblk->l_mg.nr_lines,
  827. (unsigned long long)pblk->rl.nr_secs,
  828. pblk->rwb.nr_entries);
  829. wake_up_process(pblk->writer_ts);
  830. return pblk;
  831. fail_stop_writer:
  832. pblk_writer_stop(pblk);
  833. fail_free_lines:
  834. pblk_lines_free(pblk);
  835. fail_free_l2p:
  836. pblk_l2p_free(pblk);
  837. fail_free_core:
  838. pblk_core_free(pblk);
  839. fail_free_line_meta:
  840. pblk_line_meta_free(pblk);
  841. fail_free_luns:
  842. pblk_luns_free(pblk);
  843. fail:
  844. kfree(pblk);
  845. return ERR_PTR(ret);
  846. }
  847. /* physical block device target */
  848. static struct nvm_tgt_type tt_pblk = {
  849. .name = "pblk",
  850. .version = {1, 0, 0},
  851. .make_rq = pblk_make_rq,
  852. .capacity = pblk_capacity,
  853. .init = pblk_init,
  854. .exit = pblk_exit,
  855. .sysfs_init = pblk_sysfs_init,
  856. .sysfs_exit = pblk_sysfs_exit,
  857. };
  858. static int __init pblk_module_init(void)
  859. {
  860. int ret;
  861. pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
  862. if (!pblk_bio_set)
  863. return -ENOMEM;
  864. ret = nvm_register_tgt_type(&tt_pblk);
  865. if (ret)
  866. bioset_free(pblk_bio_set);
  867. return ret;
  868. }
  869. static void pblk_module_exit(void)
  870. {
  871. bioset_free(pblk_bio_set);
  872. nvm_unregister_tgt_type(&tt_pblk);
  873. }
  874. module_init(pblk_module_init);
  875. module_exit(pblk_module_exit);
  876. MODULE_AUTHOR("Javier Gonzalez <javier@cnexlabs.com>");
  877. MODULE_AUTHOR("Matias Bjorling <matias@cnexlabs.com>");
  878. MODULE_LICENSE("GPL v2");
  879. MODULE_DESCRIPTION("Physical Block-Device for Open-Channel SSDs");